hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
debe9e4f5079c1767ee4a5874a12704d9b6439c7
| 808
|
py
|
Python
|
hackerVote/urls.py
|
v786/hacker_vote
|
7a1af94a99e2c4c2393147784c1acd2bcf0258ee
|
[
"MIT"
] | null | null | null |
hackerVote/urls.py
|
v786/hacker_vote
|
7a1af94a99e2c4c2393147784c1acd2bcf0258ee
|
[
"MIT"
] | null | null | null |
hackerVote/urls.py
|
v786/hacker_vote
|
7a1af94a99e2c4c2393147784c1acd2bcf0258ee
|
[
"MIT"
] | null | null | null |
"""hackerVote URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('hackers/', include('hackers.urls')),
path('admin/', admin.site.urls),
]
| 35.130435
| 77
| 0.705446
|
3b1f900d24a318773800855a5dd3091b1cf3dfc9
| 3,066
|
py
|
Python
|
baracus/prepare.py
|
jelman/baracus
|
d5d024af2209f91fb384b23bee8273c505f31b91
|
[
"Apache-2.0"
] | 8
|
2017-06-26T22:46:09.000Z
|
2020-08-28T05:08:34.000Z
|
baracus/prepare.py
|
jelman/baracus
|
d5d024af2209f91fb384b23bee8273c505f31b91
|
[
"Apache-2.0"
] | 11
|
2017-06-08T10:19:24.000Z
|
2020-05-13T13:11:52.000Z
|
baracus/prepare.py
|
jelman/baracus
|
d5d024af2209f91fb384b23bee8273c505f31b91
|
[
"Apache-2.0"
] | 13
|
2017-06-08T10:20:46.000Z
|
2022-03-30T22:06:03.000Z
|
import os
from baracus.utils import run, run_fs_if_not_available
def prepare_fs_data(fs_dir, out_dir, subject):
out_files = downsample_surfs(fs_dir, out_dir, subject)
out_files["aseg" + "_file"] = prepare_aseg(fs_dir, out_dir, subject)
return out_files
def downsample_surfs(fs_dir, out_dir, subject, hemis=["lh", "rh"], meas=["thickness", "area"]):
out_files = {}
for m in meas:
for h in hemis:
subject_dir = os.path.join(out_dir, subject, "data")
if not os.path.isdir(subject_dir):
os.makedirs(subject_dir)
out_file = os.path.join(subject_dir, "{h}.{m}.mgh".format(h=h, m=m))
out_files[h + "_" + m + "_file"] = out_file
if not os.path.isfile(out_file):
cmd = "mris_preproc --s {subject} --target fsaverage4 --hemi {h} " \
"--meas {m} --out {out_file}".format(
subject=subject, h=h, m=m, out_file=out_file)
run(cmd, env={"SUBJECTS_DIR": fs_dir})
return out_files
def prepare_aseg(fs_dir, out_dir, subject):
subject_dir = os.path.join(out_dir, subject, "data")
if not os.path.isdir(subject_dir):
os.makedirs(subject_dir)
out_file = os.path.join(subject_dir, "aseg")
if not os.path.isfile(out_file):
cmd = "asegstats2table --subjects {subject} --meas volume --tablefile {out_file}".format(subject=subject,
out_file=out_file)
print(cmd)
run(cmd, env={"SUBJECTS_DIR": fs_dir})
return out_file
def run_prepare_all(bids_dir, freesurfer_dir, out_dir, subjects_to_analyze, sessions_to_analyze, n_cpus, license_key,
skip_missing=False):
"""
:param bids_dir:
:param freesurfer_dir:
:param out_dir:
:param subjects_to_analyze:
:param sessions_to_analyze: {"subject_label": ["test", "retest"],...}; {} if not truly_long_study
:return:
"""
fsav_dir = os.path.join(os.environ["FREESURFER_HOME"], "subjects")
for fsav in ["fsaverage", "fsaverage4"]:
if not os.path.exists(os.path.join(freesurfer_dir, fsav)):
cmd = "cp -rf {} {}".format(os.path.join(fsav_dir, fsav), os.path.join(freesurfer_dir, fsav))
run(cmd)
# check if freesurfer is available and run if missing
freesurfer_subjects = []
for subject in subjects_to_analyze:
sessions = sessions_to_analyze.get(subject)
freesurfer_subjects.extend(run_fs_if_not_available(bids_dir, freesurfer_dir, subject, license_key, n_cpus,
sessions, skip_missing))
# downsample surfaces to fsaverage4 and extract subcortical data from aseg
out_files = {}
for fs_subject in freesurfer_subjects:
print("preparing %s" % fs_subject)
out_files[fs_subject] = prepare_fs_data(freesurfer_dir, out_dir, fs_subject)
print("FINISHED. Prepared %s" % " ".join(freesurfer_subjects))
return out_files
| 41.432432
| 117
| 0.6197
|
d90be90b5bbf7b1790d03b677d9c9f6a43fcf7a6
| 82,193
|
py
|
Python
|
torch/quantization/fx/quantization_patterns.py
|
xiaomengy/pytorch
|
5060b69d62943e11f053f8c86b8154a7d009af11
|
[
"Intel"
] | 3
|
2019-01-21T12:15:39.000Z
|
2019-06-08T13:59:44.000Z
|
torch/quantization/fx/quantization_patterns.py
|
BIT-silence/pytorch
|
5060b69d62943e11f053f8c86b8154a7d009af11
|
[
"Intel"
] | null | null | null |
torch/quantization/fx/quantization_patterns.py
|
BIT-silence/pytorch
|
5060b69d62943e11f053f8c86b8154a7d009af11
|
[
"Intel"
] | null | null | null |
import torch
from torch.fx import GraphModule
from torch.fx.graph import (
Node,
Graph,
)
from torch.quantization import (
default_affine_fixed_qparams_fake_quant,
default_symmetric_fixed_qparams_fake_quant,
)
from ..quantization_mappings import (
get_static_quant_module_class,
get_dynamic_quant_module_class,
get_quantized_operator,
)
from ..utils import (
get_swapped_custom_module_class,
activation_is_statically_quantized,
activation_is_int8_quantized,
weight_is_statically_quantized,
get_qconfig_dtypes,
activation_dtype,
get_qparam_dict,
)
from torch.ao.quantization.quantize import (
is_activation_post_process,
)
from .pattern_utils import (
register_quant_pattern,
get_default_output_activation_post_process_map,
Pattern,
)
from .utils import (
_parent_name,
all_node_args_have_no_tensors,
quantize_node,
get_per_tensor_qparams,
get_linear_prepack_op_for_dtype,
create_qparam_nodes,
get_qconv_prepack_op,
get_qconv_op,
)
from ..qconfig import QConfigAny
from abc import ABC, abstractmethod
import operator
import warnings
from typing import Any, Callable, Dict, Union, Optional, Tuple, List
# -------------------------
# Pattern Registrations
# -------------------------
# 1. Post Training Static Quantization and Quantization Aware Training Patterns
# Base Pattern Handler
class QuantizeHandler(ABC):
""" Base handler class for the quantizer patterns
"""
def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):
""" Records pattern information in __init__, which will be used
in convert
"""
# this is an indicator of whether all the inputs are Node or not
# since some op might be quantized differently depending on whether
# all inputs are tensors or not, e.g. add/mul
self.num_tensor_args = len(node.args)
self.all_node_args_are_tensors = True
# the last node of the matched pattern
self.last_node = node
def _maybe_get_last_node_only_observer(
self,
modules: Dict[str, torch.nn.Module]
) -> Optional[torch.nn.Module]:
"""
If the last node of the pattern is observed, return the observer
instance. Otherwise, return None.
"""
for maybe_obs_node, _ in self.last_node.users.items():
if maybe_obs_node.op == 'call_module':
maybe_obs = modules[str(maybe_obs_node.target)]
if is_activation_post_process(maybe_obs):
return maybe_obs
return None
def input_output_observed(self) -> bool:
"""
Returns True if the pattern matched to this qhandler could be
be observed, and False it it should not be observed.
"""
return True
def is_general_tensor_value_op(self) -> bool:
"""
Returns True if the operator works for both floating point and
quantized input, and does some computation based on the input Tensor,
so we need to insert observer/fake_quant for the output of the
operator since the distribution of values is different for input and output
Tensors (for HistogramObserver)
while they share the same quantization parameters
Example: avgpool2d
"""
return False
def is_general_tensor_shape_op(self) -> bool:
""" Similar to is_general_tensor_value_op, this is a check
for ops that works for both floating point and quantized input,
that only re-arranges the Tensor values or query some metadata about the Tensor
We don't insert observer/fake_quant for the output of these operators
Example: reshape, transpose, maxpool2d
"""
return False
def should_insert_observer_for_output(
self,
qconfig: Any,
model_is_training: bool,
) -> bool:
"""
Returns true if an observer should be inserted for the output of
the pattern matched to this QuantizeHandler instance during the
prepare step.
"""
# TODO(future PR): potentially clean up and deduplicate these
# mappings.
return self.all_node_args_are_tensors and self.input_output_observed()
def should_mark_output_quantized_from_input_quantized_status(
self,
qconfig: QConfigAny
) -> bool:
"""
Returns true if after convert, the output of the matched pattern is
quantized iff the first input is also quantized.
"""
return False
def get_activation_ctr(
self,
qconfig: Any,
pattern: Pattern,
) -> Optional[Callable]:
"""
Returns the constructor for the activation observer which should be
used for the pattern matched to this handler. Some handlers override
this to a different value than what is specified in the qconfig.
"""
return qconfig.activation
def is_output_quantized(self, qconfig, is_reference):
""" Returns true if the output node of convert is quantized
when is_reference is False, we would return float node when a certain dtype
combination is not supported (since fbgemm/qnnpack only support certain dtype
combinations), so the output may be float, but when is_reference is True,
we support all dtype combinations so the output will always be quantized.
"""
return True
@abstractmethod
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
""" Convert the given node to a quantized node and insert
it to the quantized graph
"""
return NotImplemented
# Binary op configs
# Supported combinations are:
# quant_type | activation (compute_type) | weight
# static quint8 qint8
# tuple (activation_dtype, weight_dtype, compute_dtype)
# these are supported types for common binary ops like add/mul etc.
all_dtypes = [
(torch.quint8, torch.qint8, None),
(torch.float16, torch.float16, None),
]
fp16_dtypes = [
(torch.float16, torch.float16, None)
]
int8_dtypes = [
(torch.quint8, torch.qint8, None),
]
binary_op_supported_dtypes : Dict[Union[Callable, str], List[Tuple[torch.dtype, torch.dtype, None]]] = {
operator.add: all_dtypes,
torch.add: all_dtypes,
operator.mul: all_dtypes,
torch.mul: all_dtypes,
torch.bmm: fp16_dtypes,
torch.sub: fp16_dtypes,
operator.sub: fp16_dtypes,
torch.div: fp16_dtypes,
operator.truediv: fp16_dtypes,
}
default_op_supported_dtypes = {
torch.nn.ConvTranspose1d: int8_dtypes,
torch.nn.ConvTranspose2d: int8_dtypes,
torch.nn.ELU: int8_dtypes,
torch.nn.LeakyReLU: int8_dtypes,
torch.nn.Hardswish: int8_dtypes,
torch.nn.InstanceNorm1d: int8_dtypes,
torch.nn.InstanceNorm2d: int8_dtypes,
torch.nn.InstanceNorm3d: int8_dtypes,
torch.nn.LayerNorm: all_dtypes,
torch.nn.SiLU: fp16_dtypes,
torch.nn.Mish: fp16_dtypes,
torch.nn.GELU: int8_dtypes,
torch.nn.Softmax: int8_dtypes,
torch.nn.functional.elu: int8_dtypes,
torch.nn.functional.hardswish: int8_dtypes,
torch.nn.functional.instance_norm: int8_dtypes,
torch.nn.functional.layer_norm: all_dtypes,
torch.nn.functional.leaky_relu: int8_dtypes,
torch.nn.functional.silu: fp16_dtypes,
torch.nn.functional.mish: fp16_dtypes,
torch.nn.functional.gelu: int8_dtypes,
torch.nn.functional.softmax: int8_dtypes,
torch.sum: fp16_dtypes,
}
QAT_CONV_MODULE_CLASSES = \
(torch.nn.qat.Conv2d,
torch.nn.qat.Conv3d,
torch.nn.intrinsic.qat.ConvBn2d,
torch.nn.intrinsic.qat.ConvBnReLU2d,
torch.nn.intrinsic.qat.ConvReLU2d,
torch.nn.intrinsic.qat.ConvBn3d,
torch.nn.intrinsic.qat.ConvBnReLU3d,
torch.nn.intrinsic.qat.ConvReLU3d)
##########################
# Helper Functions
##########################
def _load_weight_qparams(
self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
key = prefix + "_weight_qparams"
if key in state_dict:
self._weight_qparams = state_dict[key]
state_dict.pop(key)
def _save_weight_qparams(self, destination, prefix, keep_vars):
for attr_name in dir(self):
if "_weight_qparams" == attr_name and \
isinstance(getattr(self, attr_name), dict):
weight_qparams = getattr(self, attr_name)
destination[prefix + attr_name] = weight_qparams
def _to_reference(float_module, weight_qparams):
""" Make a weighted float module (e.g. conv and linear )a reference module by
attaching _weight_qparams that records the qparams for weight
and change the name for the module so that it's recognized
when people print the model
"""
float_module._weight_qparams = weight_qparams
float_module._register_state_dict_hook(_save_weight_qparams)
float_module._register_load_state_dict_pre_hook(_load_weight_qparams, with_module=True)
float_module_name = float_module._get_name()
def _get_name():
return float_module_name + "(Reference)"
float_module._get_name = _get_name
@register_quant_pattern(operator.add)
@register_quant_pattern(operator.sub)
@register_quant_pattern(operator.mul)
@register_quant_pattern(operator.truediv)
@register_quant_pattern(torch.add)
@register_quant_pattern(torch.sub)
@register_quant_pattern(torch.mul)
@register_quant_pattern(torch.div)
@register_quant_pattern(torch.bmm)
@register_quant_pattern((torch.nn.ReLU, operator.add))
@register_quant_pattern((torch.nn.ReLU, operator.mul))
@register_quant_pattern((torch.nn.ReLU, torch.add))
@register_quant_pattern((torch.nn.ReLU, torch.mul))
@register_quant_pattern((torch.nn.functional.relu, operator.add))
@register_quant_pattern((torch.nn.functional.relu, operator.mul))
@register_quant_pattern((torch.nn.functional.relu, torch.add))
@register_quant_pattern((torch.nn.functional.relu, torch.mul))
class BinaryOpQuantizeHandler(QuantizeHandler):
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
self.relu_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):
self.relu_node = node
node = node.args[0] # type: ignore[assignment]
self.binary_op_node = node
self.binary_op = node.target
# determine how many of the first two args are Tensors (versus scalars)
# this distinguishes things like "x + y" from "x + 2" or "2 + x"
self.num_tensor_args = 0
cache_for_no_tensor_check: Dict[Node, bool] = dict()
for arg_idx in range(len(self.binary_op_node.args)):
arg = self.binary_op_node.args[arg_idx]
if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, modules, cache_for_no_tensor_check)):
self.num_tensor_args += 1
self.all_node_args_are_tensors = \
(self.num_tensor_args == len(self.binary_op_node.args))
qbin_op_mapping: Dict[Union[Callable, str], Callable] = {
operator.add: torch.ops.quantized.add,
torch.add: torch.ops.quantized.add,
operator.mul: torch.ops.quantized.mul,
torch.mul: torch.ops.quantized.mul,
}
qbin_relu_op_mapping: Dict[Union[Callable, str], Callable] = {
operator.add: torch.ops.quantized.add_relu,
torch.add: torch.ops.quantized.add_relu,
operator.mul: torch.ops.quantized.mul_relu,
torch.mul: torch.ops.quantized.mul_relu,
}
# corresponding quantized op
self.quantized_binary_op: Optional[Callable] = None
if self.binary_op in qbin_op_mapping:
self.quantized_binary_op = qbin_relu_op_mapping[self.binary_op] \
if self.relu_node is not None \
else qbin_op_mapping[self.binary_op]
def should_insert_observer_for_output(
self,
qconfig: Any,
model_is_training: bool,
) -> bool:
"""
Returns true if an observer should be inserted for the output of
the pattern matched to this QuantizeHandler instance during the
prepare step.
"""
dtypes = get_qconfig_dtypes(qconfig)
if not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):
return False
if self.num_tensor_args == 1:
return True
elif self.all_node_args_are_tensors and self.input_output_observed():
return True
else:
return False
def is_general_tensor_value_op(self) -> bool:
return self.num_tensor_args == 1
def input_output_observed(self):
# for x + y where x and y are scalars, we do not observe anything
return self.num_tensor_args > 0
def is_output_quantized(self, qconfig, is_reference):
dtypes = get_qconfig_dtypes(qconfig)
if not is_reference:
return self.binary_op in binary_op_supported_dtypes and \
dtypes in binary_op_supported_dtypes[self.binary_op]
return True
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if self.num_tensor_args == 0:
# example: x + y, when x and y are scalars
return quantized_graph.node_copy(
node, load_arg(quantized=None))
dtypes = get_qconfig_dtypes(qconfig)
if is_reference:
act_dtype = activation_dtype(qconfig)
dtypes = get_qconfig_dtypes(qconfig)
if act_dtype == torch.float or \
not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):
return quantized_graph.node_copy(node, load_arg(quantized=torch.float))
else:
if self.num_tensor_args == 2:
# make sure both inputs are quantized to act_dtype
load_arg(quantized={0: act_dtype, 1: act_dtype})(self.binary_op_node.args)
args = load_arg(quantized=torch.float)(self.binary_op_node.args)
kwargs = load_arg(quantized=torch.float)(self.binary_op_node.kwargs)
op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))
def modified_load_arg(n: Node):
if n.name == self.binary_op_node.name:
return op_out
else:
return load_arg(quantized=torch.float)(n)
if self.relu_node:
op_out = quantized_graph.node_copy(self.relu_node, modified_load_arg)
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
return quantize_node(
op_out, activation_post_process,
node, modules, quantized_graph, node_name_to_scope, is_input=False)
elif not is_reference and self.binary_op in binary_op_supported_dtypes and \
dtypes in binary_op_supported_dtypes[self.binary_op]:
if dtypes in [(torch.quint8, torch.qint8, None)]:
assert self.quantized_binary_op is not None
if self.num_tensor_args == 1:
# add/mul scalar
first_arg = self.binary_op_node.args[0]
cache_for_no_tensor_check: Dict[Node, bool] = dict()
if isinstance(first_arg, Node) and (
not all_node_args_have_no_tensors(
first_arg, modules, cache_for_no_tensor_check)):
quantized_index = 0
else:
quantized_index = 1
return quantized_graph.create_node(
'call_function', self.quantized_binary_op,
load_arg(quantized=[quantized_index])(self.binary_op_node.args), self.binary_op_node.kwargs)
else:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]
scale = float(scale)
zero_point = int(zero_point)
scale_arg, zero_point_arg = \
create_qparam_nodes(
node.name, scale, zero_point, modules,
quantized_graph, node_name_to_scope)
kwargs = {**self.binary_op_node.kwargs}
add_args = (*load_arg(quantized=activation_dtype(qconfig))(self.binary_op_node.args), scale_arg, zero_point_arg)
op = quantized_graph.create_node(
'call_function', self.quantized_binary_op, add_args, kwargs)
return op
else:
assert dtypes == (torch.float16, torch.float16, None)
# TODO (refactor) this is duplicated, maybe have a helper function
if self.relu_node:
op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
else:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantized_graph.create_node(
"call_method", "to", (op_out, torch.float16,), {}
)
else:
# leave the op unquantized if the dtype,reference combination is not supported
warnings.warn(
"dtype combination: {} is not "
"supported by {} for is_reference={}. "
"Supported non-reference dtype combinations are: {} "
"".format(dtypes,
self.binary_op,
is_reference,
binary_op_supported_dtypes[self.binary_op]
)
)
if self.relu_node:
op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
return quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
else:
return quantized_graph.node_copy(node, load_arg(quantized=torch.float))
@register_quant_pattern(torch.cat)
class CatQuantizeHandler(QuantizeHandler):
def is_general_tensor_value_op(self) -> bool:
return True
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if not self.all_node_args_are_tensors:
return NotImplemented
if is_reference:
act_dtype = activation_dtype(qconfig)
if act_dtype == torch.float:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return op_out
else:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
# make sure the first argument is quantized to act_dtype
load_arg(quantized={0: act_dtype})(node.args)
args = list(load_arg(quantized=torch.float)(node.args))
kwargs = load_arg(quantized=torch.float)(node.kwargs)
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantize_node(
op_out,
activation_post_process,
node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
else:
return quantized_graph.node_copy(node, load_arg(quantized=torch.quint8))
# handle conv, maybe followed by relu
# NB: matching order is reversed, that is we match from the bottom of this list to the beginning
@register_quant_pattern(torch.nn.Conv1d)
@register_quant_pattern(torch.nn.Conv2d)
@register_quant_pattern(torch.nn.Conv3d)
@register_quant_pattern(torch.nn.functional.conv1d)
@register_quant_pattern(torch.nn.functional.conv2d)
@register_quant_pattern(torch.nn.functional.conv3d)
# TODO: add qat.Conv1d
@register_quant_pattern(torch.nn.qat.Conv2d)
@register_quant_pattern(torch.nn.qat.Conv3d)
@register_quant_pattern(torch.nn.intrinsic.ConvReLU1d)
@register_quant_pattern(torch.nn.intrinsic.ConvReLU2d)
@register_quant_pattern(torch.nn.intrinsic.ConvReLU3d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn1d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn2d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn3d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU1d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU2d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU3d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU2d)
@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU3d)
@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv1d))
@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv2d))
@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv3d))
@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv1d))
@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv2d))
@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv3d))
# just for error checks
@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv1d))
@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv2d))
@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv3d))
@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv2d))
@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv3d))
class ConvReluQuantizeHandler(QuantizeHandler):
def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
self.relu_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):
self.relu_node = node
node = node.args[0] # type: ignore[assignment]
self.conv_node = node
if node.op == "call_module":
self.conv = modules[str(self.conv_node.target)]
elif node.op == "call_function":
self.conv = node.target # type: ignore[assignment]
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
# Supported combinations are:
# quant_type | activation (compute_type) | weight
# static quint8 qint8
# tuple (activation_dtype, weight_dtype, compute_dtype)
supported_dtypes = [
(torch.quint8, torch.qint8, None),
]
# TODO: is_reference option for conv module
dtypes = get_qconfig_dtypes(qconfig)
# leave the op unquantized if the dtype combination is not supported
if not is_reference and dtypes not in supported_dtypes:
warnings.warn(
"dtype combination: {} is not "
"supported by Conv "
"supported dtype combinations are: {}".format(dtypes, supported_dtypes))
if self.relu_node:
conv_out = quantized_graph.node_copy(self.conv_node, load_arg(quantized=torch.float))
relu_args = [conv_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
return quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
else:
return quantized_graph.node_copy(node, load_arg(quantized=torch.float))
activation_int8_quantized = activation_is_int8_quantized(qconfig)
if self.conv_node.op == 'call_module':
# note that relu should already be fused into conv module in the fusion step
assert self.relu_node is None, 'conv module and relu fusion is not executed, ' \
'please make sure to run fusion before prepare'
output_activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert output_activation_post_process is not None
if is_reference:
# produce dequant - float_op - quant pattern
dtype = torch.float
if activation_int8_quantized:
dtype = activation_dtype(qconfig)
activation = load_arg(quantized=dtype)(self.conv_node.args[0])
args = load_arg(quantized=torch.float)(self.conv_node.args)
# Get the float conv and attach quantization scheme and quantization
# parameters of weight to the module
# and qparam is a dictionary of
# {"qscheme": ..., "scale": ..., "zero_point": ...} for per tensor quantization or
# {"qscheme": ..., "scale": ..., "zero_point": ..., "axis": ...} for per channel quantization
float_conv = self.conv
fused_conv = None
if isinstance(
float_conv,
QAT_CONV_MODULE_CLASSES):
# case 1. converting qat conv module to
# a float conv module, we need to attch
# weight fake_quant to the conv module,
# weight fake_quant is assumed to be run during
# QAT so we don't need to run it again here
float_conv = self.conv.to_float() # type: ignore[operator]
# change qat conv to conv
parent_name, name = _parent_name(self.conv_node.target)
setattr(modules[parent_name], name, float_conv)
if isinstance(float_conv, torch.nn.intrinsic._FusedModule):
fused_conv = float_conv
float_conv = float_conv[0]
weight_post_process = self.conv.weight_fake_quant
else:
# case 2. converting a conv module/fused conv module
# to float conv module, we need to attach
# weight observer to the conv module and run it
# with conv weight
if isinstance(float_conv, torch.nn.intrinsic._FusedModule):
fused_conv = float_conv
float_conv = float_conv[0] # type: ignore[index]
assert qconfig is not None
weight_post_process = qconfig.weight()
# run weight observer
weight_post_process(float_conv.weight) # type: ignore[operator]
weight_qparams = get_qparam_dict(weight_post_process)
# hardcoded for now, TODO: expose the api to user,
# we can have a map from module to reference module
# and allow user to register new ones
qconv_cls = get_static_quant_module_class(
type(float_conv), is_reference=is_reference)
ref_conv = qconv_cls.from_float(float_conv, weight_qparams) # type: ignore[attr-defined]
# if the parent is a fused conv (Sequential), we can replace the first
# item to ref conv, otherwise we can update
# the conv instance in the module tree
if fused_conv is not None:
fused_conv[0] = ref_conv
else:
parent_name, name = _parent_name(self.conv_node.target)
setattr(modules[parent_name], name, ref_conv)
op_out = quantized_graph.create_node(
'call_module',
self.conv_node.target,
args, {})
if output_activation_post_process:
op_out = quantize_node(
op_out,
output_activation_post_process,
node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
return op_out
else:
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
additional_static_quant_mapping = convert_custom_config_dict.get("static", {})
# 1. attach activation post process to module
self.conv.activation_post_process = output_activation_post_process
# 2. select quantized class
qconv_cls = get_static_quant_module_class(
type(self.conv), additional_static_quant_mapping, is_reference=is_reference)
quantized = qconv_cls.from_float(self.conv)
parent_name, name = _parent_name(self.conv_node.target)
setattr(modules[parent_name], name, quantized)
return quantized_graph.create_node(
'call_module',
self.conv_node.target,
(load_arg(quantized=torch.quint8)(self.conv_node.args[0]),),
{})
else: # call_function
assert self.conv_node.op == "call_function"
if is_reference:
# make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively
load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)
args = load_arg(quantized=torch.float)(self.conv_node.args)
kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", self.conv, args, kwargs)
if self.relu_node:
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
if activation_int8_quantized:
root_module = modules['']
act_post_process_name = self.relu_node.name if self.relu_node else self.conv_node.name
act_post_process_node = self.relu_node if self.relu_node else self.conv_node
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
return quantize_node(
op_out,
activation_post_process,
act_post_process_node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
else:
# output for dynamically quantized conv op is not quantized
return op_out
else:
assert len(self.conv_node.args) >= 7, \
"only conv2d calls with all arguments specified is supported right now in is_reference=False option"
# make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively
args = load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)
# pack weight
weight = load_arg(quantized=torch.qint8)(self.conv_node.args[1])
other_args = load_arg(quantized=torch.float)(self.conv_node.args[2:])
bias, stride, padding, dilation, groups = other_args
if self.conv == torch.nn.functional.conv1d:
# F.conv1d can take `int` as well as `list[int]` for stride,
# padding, dilation, but the prepack op cannot. Convert
# these to lists if needed.
stride = [stride] if isinstance(stride, int) else stride
padding = [padding] if isinstance(padding, int) else padding
dilation = [dilation] if isinstance(dilation, int) else dilation
prepack_args = (weight, bias, stride, padding, dilation, groups)
prepack_op = get_qconv_prepack_op(self.conv)
packed_weight = quantized_graph.create_node(
"call_function", prepack_op, prepack_args, {})
assert activation_int8_quantized, \
"currently only static quantization is supported for conv"
# construct conv input
if activation_int8_quantized:
qconv_op = get_qconv_op(self.conv, self.relu_node is not None)
conv_input = load_arg(quantized=torch.quint8)(self.conv_node.args[0])
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)
scale_node, zero_point_node = \
create_qparam_nodes(
self.conv_node.name, scale, zero_point, modules,
quantized_graph, node_name_to_scope)
qconv_args = (conv_input, packed_weight, scale_node, zero_point_node)
kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)
op = quantized_graph.create_node(
'call_function', qconv_op, qconv_args, kwargs)
# Store the name of the fused op to get the path of node after fusion as well.
# TODO: may need to change the key to Node regenerate the map in each transformation,
# since we might not be able to rely on the name
node_name_to_scope[op.name] = node_name_to_scope[self.conv_node.name]
return op
else:
# conv2d_dyanmic branch
raise Exception("Only static quant is supported for conv")
@register_quant_pattern(torch.nn.Linear)
@register_quant_pattern(torch.nn.functional.linear)
@register_quant_pattern(torch.nn.qat.Linear)
@register_quant_pattern(torch.nn.intrinsic.LinearReLU)
@register_quant_pattern(torch.nn.intrinsic.qat.LinearReLU)
@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.linear))
@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.linear))
# for error checks
@register_quant_pattern((torch.nn.ReLU, torch.nn.Linear))
@register_quant_pattern((torch.nn.functional.relu, torch.nn.Linear))
class LinearReLUQuantizeHandler(QuantizeHandler):
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
self.relu_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):
self.relu_node = node
node = node.args[0] # type: ignore[assignment]
self.linear_node = node
if node.op == 'call_module':
self.linear = modules[str(self.linear_node.target)]
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
# Supported combinations are:
# quant_type | activation (compute_type) | weight
# static quint8 qint8
# dynamic float32 (quint8) qint8
# weight_only float32 float16
# tuple (activation_dtype, weight_dtype, compute_dtype)
supported_dtypes = [
(torch.quint8, torch.qint8, None),
(torch.float32, torch.qint8, torch.quint8),
(torch.float32, torch.float16, None),
# static float16 quantization
(torch.float16, torch.float16, None),
]
dtypes = get_qconfig_dtypes(qconfig)
# leave the op unquantized if the dtype combination is not supported
if not is_reference and dtypes not in supported_dtypes:
warnings.warn(
"dtype combination: {} is not "
"supported by Linear "
"supported dtype combinations are: {}".format(dtypes, supported_dtypes))
if self.relu_node:
op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
return quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
else:
return quantized_graph.node_copy(node, load_arg(quantized=None))
activation_int8_quantized = activation_is_int8_quantized(qconfig)
activation_statically_quantized = activation_is_statically_quantized(qconfig)
weight_dtype = dtypes[1]
# TODO: reference_model option for linear module
if self.linear_node.op == 'call_module':
output_activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
# note that relu should already be fused into linear modul in the fusion step
assert self.relu_node is None, 'linear module and relu fusion is not executed, ' \
'please make sure to run fusion before prepare'
if is_reference:
# produce dequant - float_op - quant pattern
dtype = torch.float
if activation_int8_quantized:
dtype = activation_dtype(qconfig)
activation = load_arg(quantized=dtype)(self.linear_node.args[0])
args = load_arg(quantized=torch.float)(self.linear_node.args)
# Get the float linear and attach qscheme and qparams
# the the module
float_linear = self.linear
fused_linear = None
if isinstance(float_linear, (torch.nn.qat.Linear, torch.nn.intrinsic.qat.LinearReLU)):
float_linear = float_linear.to_float()
# change qat linear to linear
parent_name, name = _parent_name(self.linear_node.target)
setattr(modules[parent_name], name, float_linear)
# Attach weight fake quant to the linear module
if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):
fused_linear = float_linear
float_linear = float_linear[0]
weight_post_process = self.linear.weight_fake_quant
else:
if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):
fused_linear = float_linear
float_linear = self.linear[0] # type: ignore[index]
# Attach the weight observer to the module
weight_post_process = qconfig.weight() # type: ignore[union-attr]
# Run weight observer
weight_post_process(float_linear.weight) # type: ignore[operator]
weight_qparams = get_qparam_dict(weight_post_process)
# TODO: include the configuration in backend_config_dict
# we can have a map from module to reference module
# and allow user to register new ones
qlinear_cls = get_static_quant_module_class(
type(float_linear), is_reference=is_reference)
ref_linear = qlinear_cls.from_float(float_linear, weight_qparams)
# if the parent is a fused linear (Sequential), we can replace the first
# item to ref linear, otherwise we can update
# the linear instance in the module tree
if fused_linear is not None:
fused_linear[0] = ref_linear
else:
parent_name, name = _parent_name(self.linear_node.target)
setattr(modules[parent_name], name, ref_linear)
op_out = quantized_graph.create_node(
'call_module',
self.linear_node.target,
args, {})
if output_activation_post_process:
op_out = quantize_node(
op_out,
output_activation_post_process,
node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
return op_out
else:
# 1. attach output activation post process to linear module
if output_activation_post_process:
self.linear.activation_post_process = output_activation_post_process
# 2. select corresponding quantized linear class for the float linear class
if activation_int8_quantized:
additional_static_quant_mapping = convert_custom_config_dict.get("static", {})
qlinear = get_static_quant_module_class(
type(self.linear), additional_static_quant_mapping)
else:
assert dtypes in [
(torch.float32, torch.qint8, torch.quint8),
(torch.float32, torch.float16, None),
], f"dtype {dtypes} not supported yet"
additional_dynamic_quant_mapping = convert_custom_config_dict.get("dynamic", {})
qlinear = get_dynamic_quant_module_class(type(self.linear), additional_dynamic_quant_mapping)
quantized = qlinear.from_float(self.linear)
parent_name, name = _parent_name(self.linear_node.target)
setattr(modules[parent_name], name, quantized)
# activation needs to be quantized for static quantization
dtype = torch.float
if activation_int8_quantized:
dtype = activation_dtype(qconfig)
return quantized_graph.create_node(
'call_module',
self.linear_node.target,
(load_arg(quantized=dtype)(self.linear_node.args[0]),), {})
else: # call_function
assert self.linear_node.op == 'call_function'
if is_reference:
quantized_input_dtypes = [torch.float, torch.float]
if activation_int8_quantized:
quantized_input_dtypes[0] = torch.quint8
if weight_is_statically_quantized(qconfig):
quantized_input_dtypes[1] = torch.qint8
args = load_arg(quantized=quantized_input_dtypes)(self.linear_node.args)
args = load_arg(quantized=torch.float)(self.linear_node.args)
kwargs = load_arg(quantized=torch.float)(self.linear_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", torch.nn.functional.linear, args, kwargs)
if self.relu_node:
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
if activation_statically_quantized:
# quantize output for statically quantized linear op
root_module = modules['']
act_post_process_name = self.relu_node.name if self.relu_node else self.linear_node.name
act_post_process_node = self.relu_node if self.relu_node else self.linear_node
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
return quantize_node(
op_out,
activation_post_process,
act_post_process_node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
else:
# output for dynamically quantized linear op is not quantized
return op_out
else: # non-reference option
# prepacking weights for static int8 quant and dynamic quant
if dtypes != (torch.float16, torch.float16, None):
# linear args
# (x, weight, bias, ...)
# TODO: the name should be weight is int8 quantized
weight_quantized = weight_is_statically_quantized(qconfig)
dtype = weight_dtype if weight_quantized else torch.float
linear_weight = load_arg(quantized=dtype)(self.linear_node.args[1])
# get other arguments
kwargs = {**load_arg(quantized=torch.float)(self.linear_node.kwargs)}
# pack weight
bias = None
# all args after bias, including bias
other_args = load_arg(quantized=torch.float)(self.linear_node.args[2:])
if len(self.linear_node.args) > 2:
bias = load_arg(quantized=torch.float)(self.linear_node.args[2])
other_args = other_args[1:] # remove the bias argument
else:
assert 'bias' in kwargs, \
'expect bias provided as a keyword argument when it is not a positional argument'
bias = kwargs['bias']
kwargs.pop('bias')
prepack_args = (linear_weight, bias)
prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
packed_weight = quantized_graph.create_node(
'call_function', prepack_op, prepack_args, {})
# construct linear input
if activation_int8_quantized:
qlinear_op = torch.ops.quantized.linear_relu if self.relu_node else torch.ops.quantized.linear
linear_input = load_arg(quantized=torch.quint8)(self.linear_node.args[0])
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)
scale_node, zero_point_node = \
create_qparam_nodes(
self.linear_node.name, scale, zero_point, modules,
quantized_graph, node_name_to_scope)
qlinear_args = (linear_input, packed_weight, scale_node, zero_point_node)
op = quantized_graph.create_node(
"call_function", qlinear_op, qlinear_args, kwargs)
# Store the name of the fused op to get the path of node after fusion as well.
# TODO: may need to change the key to Node regenerate the map in each transformation,
# since we might not be able to rely on the name
node_name_to_scope[op.name] = node_name_to_scope[self.linear_node.name]
return op
elif dtypes in [(torch.float32, torch.qint8, torch.quint8),
(torch.float32, torch.float16, None)]:
# choose linear dynamic or linear dynamic fp16 op based on weight dtype
if weight_dtype == torch.qint8:
if self.relu_node:
qlinear_op = torch.ops.quantized.linear_relu_dynamic
else:
qlinear_op = torch.ops.quantized.linear_dynamic
else:
if self.relu_node:
qlinear_op = torch.ops.quantized.linear_relu_dynamic_fp16
else:
qlinear_op = torch.ops.quantized.linear_dynamic_fp16
linear_input = load_arg(quantized=torch.float)(self.linear_node.args[0])
qlinear_args = (linear_input, packed_weight) # type: ignore[assignment]
op_out = quantized_graph.create_node(
"call_function", qlinear_op, qlinear_args, kwargs)
# Store the name of the dynamic op to get the path of node after replacement as well.
# TODO: may need to change the key to Node regenerate the map in each transformation,
# since we might not be able to rely on the name
node_name_to_scope[op_out.name] = node_name_to_scope[self.linear_node.name]
return op_out
else:
assert dtypes == (torch.float16, torch.float16, None)
# TODO (refactor) this is duplicated, maybe have a helper function
if self.relu_node:
op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))
relu_args = [op_out]
relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))
relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)
op_out = quantized_graph.create_node(
"call_function", torch.nn.functional.relu, tuple(relu_args), relu_kwargs)
else:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantized_graph.create_node(
"call_method", "to", (op_out, torch.float16), {})
@register_quant_pattern(torch.nn.BatchNorm2d)
@register_quant_pattern(torch.nn.BatchNorm3d)
@register_quant_pattern(torch.nn.intrinsic.BNReLU2d)
@register_quant_pattern(torch.nn.intrinsic.BNReLU3d)
class BatchNormQuantizeHandler(QuantizeHandler):
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
assert node.op == 'call_module'
self.bn_node = node
self.bn = modules[str(self.bn_node.target)]
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
additional_static_quant_mapping = convert_custom_config_dict.get("static", {})
# 1. attach activation post process to module
output_activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert output_activation_post_process is not None
if is_reference:
# produce dequant - float_op - quant pattern
dtype = activation_dtype(qconfig)
activation = load_arg(quantized=dtype)(self.bn_node.args[0])
args = load_arg(quantized=torch.float)(self.bn_node.args)
op_out = quantized_graph.create_node(
"call_module",
self.bn_node.target,
args,
{})
if output_activation_post_process:
op_out = quantize_node(
op_out,
output_activation_post_process,
node,
modules,
quantized_graph,
node_name_to_scope,
is_input=False)
return op_out
else:
self.bn.activation_post_process = output_activation_post_process
qbn_cls = get_static_quant_module_class(type(self.bn), additional_static_quant_mapping)
quantized = qbn_cls.from_float(self.bn)
parent_name, name = _parent_name(self.bn_node.target)
setattr(modules[parent_name], name, quantized)
return quantized_graph.create_node(
'call_module',
self.bn_node.target,
load_arg(quantized=[0])(self.bn_node.args),
load_arg(quantized=torch.float)(self.bn_node.kwargs))
@register_quant_pattern(torch.nn.Embedding)
@register_quant_pattern(torch.nn.EmbeddingBag)
class EmbeddingQuantizeHandler(QuantizeHandler):
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
def input_output_observed(self) -> bool:
return False
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
# Supported combinations are:
# quant_type | activation | weight | activation_compute_type
# weight_only | float32 | quint8 | None
# weight_only | float32 | quint4x2 | None
# tuple (activation_dtype, weight_dtype, compute_dtype)
supported_dtypes = [
(torch.float32, torch.quint8, None),
(torch.float32, torch.quint4x2, None),
]
assert node.op == 'call_module'
emb_node = node
dtypes = get_qconfig_dtypes(qconfig)
# leave the op unquantized if the dtype combination is not supported
if dtypes not in supported_dtypes:
warnings.warn(
"dtype combination: {} is not "
"supported by Embedding/EmbeddingBag, "
"supported dtype combinations are: {}".format(dtypes, supported_dtypes))
return quantized_graph.node_copy(node, load_arg(quantized=None))
emb = modules[str(emb_node.target)]
qemb = get_static_quant_module_class(type(emb))
quantized = qemb.from_float(emb)
parent_name, name = _parent_name(emb_node.target)
setattr(modules[parent_name], name, quantized)
return quantized_graph.create_node(
'call_module',
emb_node.target,
load_arg(quantized=torch.float)(emb_node.args),
load_arg(quantized=torch.float)(emb_node.kwargs))
# TODO (maybe): merge with embedding quantize handler
@register_quant_pattern(torch.nn.GRUCell)
@register_quant_pattern(torch.nn.LSTMCell)
@register_quant_pattern(torch.nn.RNNCell)
@register_quant_pattern(torch.nn.LSTM)
class RNNDynamicQuantizeHandler(QuantizeHandler):
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
def input_output_observed(self) -> bool:
return False
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
# Supported combinations are:
# quant_type | activation | weight | activation_compute_type
# dynamic | float32 | qint8 | quint8
# dynamic | float32 | float16 | None
# tuple (activation_dtype, weight_dtype, compute_dtype)
supported_dtypes = [
(torch.float32, torch.qint8, torch.quint8),
(torch.float32, torch.float16, None),
]
assert node.op == 'call_module'
dtypes = get_qconfig_dtypes(qconfig)
# leave the op unquantized if the dtype combination is not supported
if dtypes not in supported_dtypes:
warnings.warn(
"dtype combination: {} is not "
"supported by Embedding/EmbeddingBag, "
"supported dtype combinations are: {}".format(dtypes, supported_dtypes))
return quantized_graph.node_copy(node, load_arg(quantized=None))
module = modules[str(node.target)]
qmodule_cls = get_dynamic_quant_module_class(type(module))
qmodule = qmodule_cls.from_float(module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, qmodule)
return quantized_graph.create_node(
'call_module',
node.target,
load_arg(quantized=torch.float)(node.args),
load_arg(quantized=torch.float)(node.kwargs))
ARGS_TO_SKIP = {
torch._ops.ops.quantized.hardswish: ['inplace'],
torch._ops.ops.quantized.elu: ['inplace'],
torch._ops.ops.quantized.instance_norm:
['running_mean', 'running_var', 'use_input_stats', 'momentum'],
}
@register_quant_pattern(torch.nn.ConvTranspose1d)
@register_quant_pattern(torch.nn.ConvTranspose2d)
@register_quant_pattern(torch.nn.ELU)
@register_quant_pattern(torch.nn.LeakyReLU)
@register_quant_pattern(torch.nn.Hardswish)
@register_quant_pattern(torch.nn.InstanceNorm1d)
@register_quant_pattern(torch.nn.InstanceNorm2d)
@register_quant_pattern(torch.nn.InstanceNorm3d)
@register_quant_pattern(torch.nn.LayerNorm)
@register_quant_pattern(torch.nn.SiLU)
@register_quant_pattern(torch.nn.Mish)
# we currently only support reference patterns for these ops so they have been removed
# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig
# @register_quant_pattern(torch.nn.GELU)
# @register_quant_pattern(torch.nn.Softmax)
@register_quant_pattern(torch.nn.functional.elu)
@register_quant_pattern(torch.nn.functional.hardswish)
@register_quant_pattern(torch.nn.functional.instance_norm)
@register_quant_pattern(torch.nn.functional.layer_norm)
@register_quant_pattern(torch.nn.functional.leaky_relu)
@register_quant_pattern(torch.nn.functional.silu)
@register_quant_pattern(torch.nn.functional.mish)
# we currently only support reference patterns for these ops so they have been removed
# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig
# @register_quant_pattern(torch.nn.functional.gelu)
# @register_quant_pattern(torch.nn.functional.softmax)
@register_quant_pattern(torch.sum)
class DefaultNodeQuantizeHandler(QuantizeHandler):
""" Common quantized op, first input and first output will be quantized
"""
def __init__(
self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
if node.op == "call_function" or node.op == "call_method":
self.op = node.target
elif node.op == "call_module":
self.op = type(modules[str(node.target)])
def is_output_quantized(self, qconfig, is_reference):
dtypes = get_qconfig_dtypes(qconfig)
if not is_reference:
return self.op in default_op_supported_dtypes and \
dtypes in default_op_supported_dtypes[self.op]
return True
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if not self.all_node_args_are_tensors:
return NotImplemented
assert node.op in ['call_module', 'call_function'], 'Only call_module and ' + \
'call_function are handled in DefaultNode'
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
additional_static_quant_mapping = convert_custom_config_dict.get("static", {})
dtypes = get_qconfig_dtypes(qconfig)
if not is_reference and dtypes not in default_op_supported_dtypes[self.op]:
warnings.warn(
"dtype combination: {} is not "
"supported by {} "
"supported dtype combinations are: {}".format(dtypes, self.op, default_op_supported_dtypes[self.op]))
return quantized_graph.node_copy(node, load_arg(quantized=torch.float))
# TODO: make helper functions for (torch.quint8, torch.qint8, None)
if not is_reference:
if dtypes in [(torch.quint8, torch.qint8, None)]:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
if node.op == 'call_module':
module = modules[str(node.target)]
module.activation_post_process = activation_post_process
quantized_module_cls = get_static_quant_module_class(
type(module), additional_static_quant_mapping)
quantized_module = quantized_module_cls.from_float(module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, quantized_module)
return quantized_graph.create_node(
'call_module',
node.target,
load_arg(quantized=[0])(node.args),
load_arg(quantized=torch.float)(node.kwargs))
else:
assert node.op == "call_function"
# call_function
scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]
scale = float(scale)
zero_point = int(zero_point)
scale_arg, zero_point_arg = \
create_qparam_nodes(
node.name, scale, zero_point, modules,
quantized_graph, node_name_to_scope)
assert not isinstance(node.target, str), "Expecting node.target for "
"call_function to be a function instead of a string"
quantized_op = get_quantized_operator(node.target)
args = load_arg(quantized=[0])(node.args)
kwargs = {**load_arg(quantized=torch.float)(node.kwargs), "output_scale": scale_arg,
"output_zero_point": zero_point_arg}
if quantized_op in ARGS_TO_SKIP:
args_to_skip = ARGS_TO_SKIP[quantized_op]
for arg in args_to_skip:
if arg in kwargs:
kwargs.pop(arg)
return quantized_graph.create_node(
"call_function", quantized_op, args, kwargs) # type: ignore[arg-type]
else:
assert dtypes in [(torch.float16, torch.float16, None)]
# Generally fp16 kernels don't exist for fp16 ops
warnings.warn(
"Only reference patterns are currently supported for {dtype} dtype with {op} op"
"".format(dtype=dtypes, op=self.op))
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantized_graph.create_node(
"call_method", "to", (op_out, torch.float16), {})
else:
assert is_reference
# We can produce reference for a dtypes including
# (torch.quint8, torch.qint8, torch.qint32, torch.float16)
act_dtype = activation_dtype(qconfig)
if act_dtype == torch.float:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return op_out
else:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
# make sure the input is quantized to act_dtype
load_arg(quantized={0: act_dtype})(node.args)
args = load_arg(quantized=torch.float)(node.args)
kwargs = load_arg(quantized=torch.float)(node.kwargs)
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantize_node(
op_out, activation_post_process,
node, modules, quantized_graph, node_name_to_scope, is_input=False)
@register_quant_pattern(torch.nn.Hardsigmoid, default_affine_fixed_qparams_fake_quant)
@register_quant_pattern(torch.nn.functional.hardsigmoid, default_affine_fixed_qparams_fake_quant)
@register_quant_pattern('hardsigmoid', default_affine_fixed_qparams_fake_quant)
@register_quant_pattern('hardsigmoid_', default_affine_fixed_qparams_fake_quant)
@register_quant_pattern(torch.nn.Sigmoid, default_affine_fixed_qparams_fake_quant)
@register_quant_pattern(torch.sigmoid, default_affine_fixed_qparams_fake_quant)
@register_quant_pattern('sigmoid', default_affine_fixed_qparams_fake_quant)
@register_quant_pattern('sigmoid_', default_affine_fixed_qparams_fake_quant)
@register_quant_pattern(torch.nn.Tanh, default_symmetric_fixed_qparams_fake_quant)
@register_quant_pattern(torch.tanh, default_symmetric_fixed_qparams_fake_quant)
@register_quant_pattern('tanh', default_symmetric_fixed_qparams_fake_quant)
@register_quant_pattern('tanh_', default_symmetric_fixed_qparams_fake_quant)
class FixedQParamsOpQuantizeHandler(QuantizeHandler):
def __init__(self,
node: Node,
modules: Dict[str, torch.nn.Module]):
super().__init__(node, modules)
self.node = node
def should_mark_output_quantized_from_input_quantized_status(
self,
qconfig: QConfigAny
) -> bool:
# FixQParamOps are the same as CopyNode in int8 quantization
return activation_dtype(qconfig) in [torch.quint8, torch.qint8]
# some qhandlers override the activations constructor
def get_activation_ctr(self, qconfig, pattern) -> Optional[Callable]:
if activation_dtype(qconfig) == torch.float16:
return qconfig.activation
else:
return get_default_output_activation_post_process_map().get(
pattern, None)
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
if not is_reference:
dtypes = get_qconfig_dtypes(qconfig)
if dtypes == (torch.float16, torch.float16, None):
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantized_graph.create_node(
"call_method", "to", (op_out, torch.float16,), {}
)
else:
return quantized_graph.node_copy(node, load_arg(quantized=None))
else:
act_dtype = activation_dtype(qconfig)
if act_dtype == torch.float:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return op_out
else:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
# make sure the input is quantized to act_dtype
load_arg(quantized={0: act_dtype})(node.args)
args = load_arg(quantized=torch.float)(node.args)
kwargs = load_arg(quantized=torch.float)(node.kwargs)
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantize_node(
op_out, activation_post_process,
node, modules, quantized_graph, node_name_to_scope, is_input=False)
@register_quant_pattern(torch.nn.AdaptiveAvgPool1d)
@register_quant_pattern(torch.nn.AdaptiveAvgPool2d)
@register_quant_pattern(torch.nn.AdaptiveAvgPool3d)
@register_quant_pattern(torch.nn.AvgPool1d)
@register_quant_pattern(torch.nn.AvgPool2d)
@register_quant_pattern(torch.nn.AvgPool3d)
@register_quant_pattern(torch.nn.Dropout)
@register_quant_pattern(torch.nn.Hardtanh)
@register_quant_pattern(torch.nn.MaxPool1d)
@register_quant_pattern(torch.nn.MaxPool2d)
@register_quant_pattern(torch.nn.MaxPool3d)
@register_quant_pattern(torch.nn.ReLU)
@register_quant_pattern(torch.nn.ReLU6)
@register_quant_pattern(torch.adaptive_avg_pool1d)
@register_quant_pattern(torch.nn.functional.adaptive_avg_pool2d)
@register_quant_pattern(torch.nn.functional.adaptive_avg_pool3d)
@register_quant_pattern(torch.nn.functional.dropout)
@register_quant_pattern(torch.nn.functional.hardtanh)
@register_quant_pattern(torch.nn.functional.hardtanh_)
@register_quant_pattern(torch.nn.functional.interpolate)
@register_quant_pattern(torch.nn.functional.max_pool1d)
@register_quant_pattern(torch.nn.functional.max_pool2d)
@register_quant_pattern(torch.nn.functional.max_pool3d)
@register_quant_pattern(torch.nn.functional.relu)
@register_quant_pattern(torch.nn.functional.relu6)
@register_quant_pattern(torch.avg_pool1d)
@register_quant_pattern(torch._C._nn.avg_pool2d)
@register_quant_pattern(torch._C._nn.avg_pool3d)
@register_quant_pattern(torch.clamp)
@register_quant_pattern(torch.flatten)
@register_quant_pattern(torch.max)
@register_quant_pattern(torch.mean)
@register_quant_pattern(torch.min)
@register_quant_pattern(operator.floordiv)
@register_quant_pattern('clamp')
@register_quant_pattern('mean')
@register_quant_pattern('relu')
@register_quant_pattern('relu_')
class CopyNodeQuantizeHandler(QuantizeHandler):
""" Operators that works on both float and quantized input
if input is quantized, the output Tensor shares
the same quantization parameter with input.
These ops will do computation on the input Tensor, e.g. average pool, so we will
insert extra observer/fake_quant for the output of these operators.
TODO: maybe rename this to TensorValueOpQuantizeHandler
"""
def should_mark_output_quantized_from_input_quantized_status(
self,
qconfig: QConfigAny
) -> bool:
return True
def is_general_tensor_value_op(self) -> bool:
return True
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
# always produce reference pattern for relu
is_relu = node.op == "call_function" and node.target == torch.nn.functional.relu
if is_reference or is_relu:
# when activation dtype is torch.float, the node does not require
# observation
# e.g. dynamic quantization or weight_only quantization
act_dtype = activation_dtype(qconfig)
if act_dtype == torch.float:
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return op_out
else:
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
# make sure the input is quantized to act_dtype
load_arg(quantized={0: act_dtype})(node.args)
args = list(load_arg(quantized=torch.float)(node.args))
kwargs = load_arg(quantized=torch.float)(node.kwargs)
op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))
return quantize_node(
op_out,
activation_post_process,
node, modules, quantized_graph, node_name_to_scope, is_input=False)
else:
return quantized_graph.node_copy(node, load_arg(quantized=None))
class CustomModuleQuantizeHandler(QuantizeHandler):
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
""" Convert a float custom module to quantized custom module
"""
assert node.op == 'call_module'
assert convert_custom_config_dict is not None
custom_module_class_mapping = convert_custom_config_dict.get("observed_to_quantized_custom_module_class", None)
assert custom_module_class_mapping is not None
observed_custom_module = modules[str(node.target)]
if activation_is_statically_quantized(qconfig):
activation_post_process = \
self._maybe_get_last_node_only_observer(modules)
assert activation_post_process is not None
observed_custom_module.activation_post_process = activation_post_process
quantized_custom_module_class = get_swapped_custom_module_class(
observed_custom_module, custom_module_class_mapping, qconfig)
quantized_custom_module = \
quantized_custom_module_class.from_observed(observed_custom_module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, quantized_custom_module)
# hardcoded the quntized input to be None (take whatever is in the environemnt),
# we can extend this
# if there is a need, e.g. get the indexes of quantized inputs from some
# module attribute like module._QUANTIZED_INPUT_INDEXES
return quantized_graph.node_copy(node, load_arg(quantized=None))
@register_quant_pattern(torch.nn.Identity)
@register_quant_pattern(torch.chunk)
@register_quant_pattern(torch.transpose)
@register_quant_pattern(torch.repeat_interleave)
@register_quant_pattern(torch.sort)
@register_quant_pattern(torch.squeeze)
@register_quant_pattern(torch.stack)
@register_quant_pattern(torch.unsqueeze)
@register_quant_pattern(operator.getitem)
@register_quant_pattern('chunk')
@register_quant_pattern('contiguous')
@register_quant_pattern('detach')
@register_quant_pattern('detach_')
@register_quant_pattern('numel')
@register_quant_pattern('permute')
@register_quant_pattern('repeat')
@register_quant_pattern('repeat_interleave')
@register_quant_pattern('reshape')
@register_quant_pattern('resize_')
@register_quant_pattern('shape')
@register_quant_pattern('size')
@register_quant_pattern('squeeze')
@register_quant_pattern('squeeze_')
@register_quant_pattern('transpose')
@register_quant_pattern('unsqueeze')
@register_quant_pattern('unsqueeze_')
@register_quant_pattern('view')
class GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):
""" Operators that works on both float and quantized input
if input is quantized, the output Tensor shares
the same quantization parameter with input.
These ops only do rearrangement of Tensor values, for
example reshape, or just query the information about Tensor
e.g. size, and we do not insert extra observer/fake_quant
for the output of the operator.
"""
def is_general_tensor_shape_op(self) -> bool:
return True
def should_mark_output_quantized_from_input_quantized_status(
self,
qconfig: QConfigAny
) -> bool:
return True
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
return quantized_graph.node_copy(node, load_arg(quantized=None))
class StandaloneModuleQuantizeHandler(QuantizeHandler):
""" Converts an observed standalone module to quantized standalone module
by calling convert_fx on the observed standalone module.
"""
def convert(self,
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
load_arg: Callable,
is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None) -> Node:
assert node.op == 'call_module'
convert = torch.quantization.quantize_fx._convert_standalone_module_fx # type: ignore[attr-defined]
# We know that observed standalone module is a GraphModule since
# it's produced by us
observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
input_quantized_idxs = observed_standalone_module._standalone_module_input_quantized_idxs.tolist() # type: ignore[operator]
quantized_standalone_module = convert(observed_standalone_module, is_reference=is_reference)
parent_name, name = _parent_name(node.target)
# update the modules dict
setattr(modules[parent_name], name, quantized_standalone_module)
modules[str(node.target)] = quantized_standalone_module
return quantized_graph.node_copy(node, load_arg(quantized=input_quantized_idxs))
| 48.60615
| 132
| 0.623289
|
a6c381c609a0f46ed1db7b960599e39daf2604ab
| 1,436
|
py
|
Python
|
venv/Lib/site-packages/tests/test_048_FetchTupleBinaryData_02.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/tests/test_048_FetchTupleBinaryData_02.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/tests/test_048_FetchTupleBinaryData_02.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_048_FetchTupleBinaryData_02(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_048)
def run_test_048(self):
conn = ibm_db.connect(config.database, config.user, config.password)
if (not conn):
print("Could not make a connection.")
return 0
server = ibm_db.server_info( conn )
fp = open("tests/spook_out.png", "wb")
if (server.DBMS_NAME[0:3] == 'IDS'):
result = ibm_db.exec_immediate(conn, "SELECT picture FROM animal_pics WHERE name = 'Spook'")
else:
result = ibm_db.exec_immediate(conn, "SELECT picture, LENGTH(picture) FROM animal_pics WHERE name = 'Spook'")
if (not result):
print("Could not execute SELECT statement.")
return 0
row = ibm_db.fetch_tuple(result)
if row:
fp.write(row[0])
else:
print(ibm_db.stmt_errormsg())
fp.close()
cmp = (open('tests/spook_out.png', "rb").read() == open('tests/spook.png', "rb").read())
print("Are the files the same:", cmp)
#__END__
#__LUW_EXPECTED__
#Are the files the same: True
#__ZOS_EXPECTED__
#Are the files the same: True
#__SYSTEMI_EXPECTED__
#Are the files the same: True
#__IDS_EXPECTED__
#Are the files the same: True
| 28.156863
| 117
| 0.688022
|
b7453292ffc5d462d75a472e25d11e8f08708731
| 2,785
|
py
|
Python
|
backend/exc.py
|
al-indigo/vmemperor
|
80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1
|
[
"Apache-2.0"
] | null | null | null |
backend/exc.py
|
al-indigo/vmemperor
|
80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1
|
[
"Apache-2.0"
] | 8
|
2017-10-11T13:26:10.000Z
|
2021-12-13T20:27:52.000Z
|
backend/exc.py
|
ispras/vmemperor
|
80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1
|
[
"Apache-2.0"
] | 4
|
2017-07-27T12:25:42.000Z
|
2018-01-28T02:06:26.000Z
|
import json
from rethinkdb import RethinkDB
r = RethinkDB()
import tornado.options as opts
__all__ = ['EmperorException',
'XenAdapterException',
'XenAdapterAPIError', 'XenAdapterArgumentError', 'XenAdapterConnectionError',
'AuthenticationException', 'AuthenticationRealmException', 'AuthenticationUserNotFoundException', 'AuthenticationWithEmptyPasswordException']
class EmperorException(Exception):
def __init__(self, log, message):
log.error(f"{type(self).__name__}: {message}", exc_info=True)
super().__init__()
self.message = message
self.log = log
def __str__(self):
return f"<{self.__class__.__name__}>: {self.message}"
class XenAdapterException(EmperorException):
pass
class XenAdapterConnectionError(XenAdapterException):
pass
class XenAdapterAPIError(XenAdapterException):
def __init__(self, log, message, details=None):
self.details = self.print_details(details)
super().__init__(log, message=json.dumps({'message' : message, 'details' : self.details}))
@staticmethod
def print_details(details):
if not details:
return None
if details[0] == 'VDI_MISSING':
return {
"error_code" : details[0],
"SR": details[1],
"VDI": details[2],
}
elif details[0] == 'UUID_INVALID':
return {
"error_code": details[0],
"object_type": details[1],
"uuid": details[2],
}
elif details[0] == 'HANDLE_INVALID':
return {
"error_code": details[0],
"object_type": details[1],
"ref": details[2]
}
else:
return details
class XenAdapterArgumentError(XenAdapterException):
pass
class AuthenticationException(EmperorException):
pass
class AuthenticationRealmException(AuthenticationException):
pass
class AuthenticationUserNotFoundException(AuthenticationException):
def __init__(self, log, realm):
super().__init__(log, f"Realm {type(realm).__name__} can't find user {realm.username}")
class AuthenticationPasswordException(AuthenticationException):
def __init__(self, log, realm):
super().__init__(log,
f"Realm {type(realm).__name__} can't find authenticate user {realm.get_id()}: incorrect password")
class AuthenticationWithEmptyPasswordException(AuthenticationException):
def __init__(self, log, realm):
super().__init__(log,
f"Realm {type(realm).__name__} can't find authenticate user {realm.username}: empty password")
class UnauthorizedException(AuthenticationException):
pass
| 31.647727
| 153
| 0.64237
|
2dc9a99e818ef96a36d30df790bace2c18172635
| 4,296
|
py
|
Python
|
models.py
|
jbmyre/ip
|
99178b3e25b91cdabdbb1e453ad753dd775f773a
|
[
"MIT"
] | 1
|
2016-11-02T20:32:34.000Z
|
2016-11-02T20:32:34.000Z
|
models.py
|
jbmyre/ip_manager
|
99178b3e25b91cdabdbb1e453ad753dd775f773a
|
[
"MIT"
] | null | null | null |
models.py
|
jbmyre/ip_manager
|
99178b3e25b91cdabdbb1e453ad753dd775f773a
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import post_save
class Subnet(models.Model):
STATIC = 'Static'
DHCP = 'DHCP'
name = models.CharField(
verbose_name="Subnet Name",
max_length=100
)
vlan = models.IntegerField(
verbose_name='Vlan Id'
)
first_host = models.GenericIPAddressField(
protocol='IPv4'
)
last_host = models.GenericIPAddressField(
protocol='IPv4'
)
cidr = models.CharField(
verbose_name="CIDR Scope",
max_length=3,
default='/24',
help_text='The CIDR notation for the subnet (ie "/24"). defaults to /24'
)
netmask = models.GenericIPAddressField(
protocol='IPv4',
default="255.255.255.0"
)
broadcast = models.GenericIPAddressField(
protocol='IPv4'
)
dns_1 = models.GenericIPAddressField(
verbose_name="Primary DNS Server",
blank=True,
null=True,
protocol='IPv4'
)
dns_2 = models.GenericIPAddressField(
verbose_name="Secondary DNS Server",
blank=True,
null=True,
protocol='IPv4'
)
gateway = models.GenericIPAddressField(
verbose_name="Gateway",
protocol='IPv4'
)
last_sweep = models.DateTimeField(
blank=True,
null=True,
)
SUBNET_TYPE_CHOICES = (
(STATIC, 'Static'),
(DHCP, 'DHCP'),
)
type = models.CharField(
verbose_name="Subnet Type",
max_length=20,
choices=SUBNET_TYPE_CHOICES,
default=STATIC,
)
dhcp_start = models.GenericIPAddressField(
blank=True,
null=True,
protocol='IPv4',
)
dhcp_end = models.GenericIPAddressField(
blank=True,
null=True,
protocol='IPv4',
)
def __str__(self):
return "%s" % self.name
class Meta:
verbose_name = ('Subnet')
verbose_name_plural = ('Subnets')
class Host(models.Model):
STATIC = 'Static'
DHCP_RESERVATION = 'DHCP_RESERVATION'
DHCP = 'DHCP'
SUCCESS = 'Success'
UNDETERMINED = 'Undetermined'
FAIL = 'Fail'
address = models.GenericIPAddressField(protocol='IPv4',unique=True)
subnet = models.ForeignKey(
Subnet,
on_delete=models.CASCADE,
)
machine_name = models.CharField(
blank=True,
null=True,
max_length=100
)
building = models.CharField(
blank=True,
null=True,
max_length=100
)
location = models.CharField(
blank=True,
null=True,
max_length=100
)
machine_dec = models.TextField(
verbose_name="Machine Description",
blank=True,
null=True,
)
ADDRESS_TYPE_CHOICES = (
(STATIC, 'Static'),
(DHCP_RESERVATION, 'DHCP Reservation'),
(DHCP, 'DHCP'),
)
address_type = models.CharField(
max_length=20,
choices=ADDRESS_TYPE_CHOICES,
default=STATIC,
)
eth_port = models.IntegerField(
blank=True,
null=True,
verbose_name='Vlan Id',
default=0,
)
notes = models.TextField(
blank=True,
null=True,
)
PING_STATUS_CHOICES = (
(SUCCESS, 'Success'),
(UNDETERMINED, 'Undetermined'),
(FAIL, 'Failed'),
)
ping_status = models.CharField(
max_length=20,
choices=PING_STATUS_CHOICES,
default=UNDETERMINED,
)
last_ping = models.DateTimeField(
blank=True,
null=True
)
def __str__(self):
return "%s" % self.address
class Meta:
ordering = ('id',)
verbose_name = 'Host'
verbose_name_plural = 'Hosts'
class PingHistory(models.Model):
SUCCESS = 'Success'
UNDETERMINED = 'Undetermined'
FAIL = 'Fail'
host = models.ForeignKey(Host, on_delete=models.CASCADE)
PING_STATUS_CHOICES = (
(SUCCESS, 'Success'),
(UNDETERMINED, 'Undetermined'),
(FAIL, 'Failed'),
)
ping_status = models.CharField(
max_length=20,
choices=PING_STATUS_CHOICES,
default=UNDETERMINED,
)
ping_date = models.DateTimeField(
blank=True,
null=True
)
| 22.259067
| 80
| 0.583333
|
68e5866b35ae2616b3079c3221dd469bcee91f16
| 12,020
|
py
|
Python
|
tests/_server_test.py
|
tods-doc/axolotl
|
6fc87bedb514677db09c039d492d1d3c7864913d
|
[
"Apache-2.0"
] | null | null | null |
tests/_server_test.py
|
tods-doc/axolotl
|
6fc87bedb514677db09c039d492d1d3c7864913d
|
[
"Apache-2.0"
] | null | null | null |
tests/_server_test.py
|
tods-doc/axolotl
|
6fc87bedb514677db09c039d492d1d3c7864913d
|
[
"Apache-2.0"
] | null | null | null |
# from __future__ import print_function
import argparse
import os
import pathlib
from pprint import pprint
import grpc
from d3m import utils as d3m_utils, runtime as runtime_module
from d3m.metadata import problem as problem_module
from ta3ta2_api import core_pb2, core_pb2_grpc, value_pb2, utils
from axolotl.utils import pipeline as pipeline_utils
from axolotl.d3m_grpc import constants
# with d3m_utils.silence():
# d3m_index.load_all(blocklist=constants.PrimitivesList.BLACK_LIST)
# primitives = [
# 'd3m.primitives.datasets.DatasetToDataFrame',
# 'd3m.primitives.data_transformation.denormalize.Common'
# ]
#
# with d3m_utils.silence():
# for primitive in primitives:
# d3m_index.get_primitive(primitive)
LENGTH = 60
ALLOWED_VALUE_TYPES = ['DATASET_URI', 'CSV_URI', 'RAW']
FULL_SPECIFIED_PIPELINE_PATH = 'modules/server/test_full_pipeline.json'
PRE_SPECIFIED_PIPELINE_PATH = 'modules/server/test_placeholder.json'
# PRE_SPECIFIED_PIPELINE_PATH = 'modules/server/test_placeholder_pipeline.json'
def hello_request():
request = core_pb2.HelloRequest()
return request
def list_primitives_request():
request = core_pb2.ListPrimitivesRequest()
return request
def search_solutions_request(test_paths, specified_template=None):
user_agent = "test_agent"
version = core_pb2.DESCRIPTOR.GetOptions().Extensions[core_pb2.protocol_version]
time_bound = 0.5
priority = 10
# allowed_value_types = [value_pb2.ValueType.Value(value) for value in ALLOWED_VALUE_TYPES]
problem_description = utils.encode_problem_description(
problem_module.Problem.load(test_paths['TRAIN']['problem'])
)
template = None
if specified_template == 'FULL':
with d3m_utils.silence():
pipeline = pipeline_utils.load_pipeline(FULL_SPECIFIED_PIPELINE_PATH)
template = utils.encode_pipeline_description(pipeline, ALLOWED_VALUE_TYPES, constants.Path.TEMP_STORAGE_ROOT)
elif specified_template == 'PRE': # PRE for PREPROCESSING
pipeline = runtime_module.get_pipeline(PRE_SPECIFIED_PIPELINE_PATH, load_all_primitives=False)
template = utils.encode_pipeline_description(pipeline, ALLOWED_VALUE_TYPES, constants.Path.TEMP_STORAGE_ROOT)
inputs = [
value_pb2.Value(
dataset_uri=test_paths['TRAIN']['dataset']
)
]
request = core_pb2.SearchSolutionsRequest(
user_agent=user_agent,
version=version,
time_bound_search=time_bound,
priority=priority,
allowed_value_types=ALLOWED_VALUE_TYPES,
problem=problem_description,
template=template,
inputs=inputs
)
return request
def get_search_solution_results_request(search_id):
request = core_pb2.GetSearchSolutionsResultsRequest(search_id=search_id)
return request
def fit_solution_request(solution_id, test_paths):
inputs = [
value_pb2.Value(
dataset_uri=test_paths['TRAIN']['dataset']
)
]
expose_outputs = ['outputs.0']
expose_value_types = ['CSV_URI']
users = [
core_pb2.SolutionRunUser(
id='test_user',
chosen=True,
reason='just because'
)
]
request = core_pb2.FitSolutionRequest(
solution_id=solution_id,
inputs=inputs,
expose_outputs=expose_outputs,
expose_value_types=expose_value_types,
users=users
)
return request
def get_fit_solution_results_request(request_id):
request = core_pb2.GetFitSolutionResultsRequest(
request_id=request_id
)
return request
def produce_solution_request(fitted_solution_id, test_paths):
inputs = [
value_pb2.Value(
dataset_uri=test_paths['TEST']['dataset']
)
]
expose_outputs = ['outputs.0']
expose_value_types = ['CSV_URI']
users = [
core_pb2.SolutionRunUser(
id='test_user',
chosen=True,
reason='just because'
)
]
request = core_pb2.ProduceSolutionRequest(
fitted_solution_id=fitted_solution_id,
inputs=inputs,
expose_outputs=expose_outputs,
expose_value_types=expose_value_types,
users=users
)
return request
def get_produce_solution_results_request(request_id):
request = core_pb2.GetProduceSolutionResultsRequest(
request_id=request_id
)
return request
def describe_solution_request(solution_id):
request = core_pb2.DescribeSolutionRequest(
solution_id=solution_id
)
return request
def score_solution_request(solution_id, test_paths):
inputs = [
value_pb2.Value(
dataset_uri=test_paths['SCORE']['dataset']
)
]
problem = problem_module.Problem.load(test_paths['SCORE']['problem'])
performance_metrics = []
for performance_metric in problem['problem'].get('performance_metrics', []):
performance_metrics.append(utils.encode_performance_metric(performance_metric))
# TODO add support for more evaluation methods
users = []
evaluation_method = 'K_FOLD'
configuration = core_pb2.ScoringConfiguration(
method=evaluation_method,
folds=2,
# train_test_ratio
shuffle=True,
random_seed=42,
stratified=True,
)
request = core_pb2.ScoreSolutionRequest(
solution_id=solution_id,
inputs=inputs,
performance_metrics=performance_metrics,
users=users,
configuration=configuration
)
return request
def get_score_solution_request(solution_id):
request = core_pb2.ScoreSolutionRequest(
solution_id=solution_id
)
return request
def solution_export_request(solution_id):
rank = 0.1
request = core_pb2.SolutionExportRequest(
solution_id=solution_id,
rank=rank
)
return request
def end_search_solutions_request(search_id):
request = core_pb2.EndSearchSolutionsRequest(search_id=search_id)
return request
def stop_search_solution_request(search_id):
request = core_pb2.StopSearchSolutionsRequest(search_id=search_id)
return request
def run(test_paths, specified_template=None):
channel = grpc.insecure_channel('localhost:45042')
stub = core_pb2_grpc.CoreStub(channel)
print_name('Hello')
hello_r = stub.Hello(hello_request())
pprint(hello_r)
print_name('ListPrimitive')
list_primitives_r = stub.ListPrimitives(list_primitives_request())
for _primitive in list_primitives_r.primitives:
print_space()
pprint(_primitive)
print_name('SearchSolution')
search_solutions_r = stub.SearchSolutions(search_solutions_request(test_paths, specified_template))
search_id = search_solutions_r.search_id
pprint(search_solutions_r)
print_name('GetSearchSolutionsResults')
solution_id = None
for get_search_solution_r in stub.GetSearchSolutionsResults(get_search_solution_results_request(search_id)):
print_space()
pprint(get_search_solution_r)
if get_search_solution_r.solution_id:
solution_id = get_search_solution_r.solution_id
print_name('DescribeSolution')
describe_solution_r = stub.DescribeSolution(describe_solution_request(solution_id))
pprint(describe_solution_r)
print_name('FitSolution')
fit_solution_r = stub.FitSolution(fit_solution_request(solution_id, test_paths))
fit_request_id = fit_solution_r.request_id
pprint(fit_solution_r)
print_name('GetFitSolutionResultsRequest')
fitted_solution_id = None
for get_git_solution_results_r in stub.GetFitSolutionResults(get_fit_solution_results_request(fit_request_id)):
print_space()
pprint(get_git_solution_results_r)
fitted_solution_id = get_git_solution_results_r.fitted_solution_id
print_name('ProduceSolutionRequest')
produce_solution_r = stub.ProduceSolution(produce_solution_request(fitted_solution_id, test_paths))
produce_request_id = produce_solution_r.request_id
pprint(produce_solution_r)
print_name('GetProduceSolutionResultsRequest')
for get_produce_solution_results_r in stub.GetProduceSolutionResults(
get_produce_solution_results_request(produce_request_id)):
print_space()
pprint(get_produce_solution_results_r)
print_name('ScoreSolution')
score_solution_r = stub.ScoreSolution(score_solution_request(solution_id, test_paths))
score_request_id = score_solution_r.request_id
pprint(score_solution_r)
print_name('GetScoreSolutionResults')
for score_solution_r in stub.GetScoreSolutionResults(get_score_solution_request(score_request_id)):
print_space()
pprint(score_solution_r)
print_name('SolutionExport')
solution_export_r = stub.SolutionExport(solution_export_request(solution_id))
pprint(solution_export_r)
print_name('StopSearchSolutions')
stop_search_solution_r = stub.StopSearchSolutions(stop_search_solution_request(search_id))
pprint(stop_search_solution_r)
print_name('EndSearchSolutions')
end_search_solutions_r = stub.EndSearchSolutions(end_search_solutions_request(search_id))
pprint(end_search_solutions_r)
def print_name(name):
length = LENGTH
free_space = length - len(name) - 2
space = int(free_space / 2)
name = '#' + ' ' * space + name + ' ' * space
if free_space % 2 == 0:
name = name + '#'
else:
name = name + ' #'
print("#" * length)
print(name)
print("#" * length)
def print_space():
print('-' * LENGTH)
def configure_parser(parser, *, skip_arguments=()):
parser.add_argument(
'-t', '--test-path', type=str, default="/D3M/internal_d3m/Winter_2018_tamuta2/datasets/26/",
help="path of d3m dataset to test."
)
def get_problem_id(test_path):
problem_description = problem_module.Problem.load(test_path)
print(problem_description)
problem_id = problem_description.get('id', None)
return problem_id
def get_paths(test_path):
# Classification Score dataset path is (problem_SCORE, dataset_SCORE) not
# However, regression and other Score dataset path is (problem_TEST, dataset_TEST)
score_problem_relative_path = os.path.join(test_path, 'SCORE/problem_SCORE/problemDoc.json')
score_dataset_relative_path = os.path.join(test_path, 'SCORE/dataset_SCORE/datasetDoc.json')
if not os.path.exists(score_problem_relative_path) or not os.path.exists(score_dataset_relative_path):
score_problem_relative_path = os.path.join(test_path, 'SCORE/problem_TEST/problemDoc.json')
score_dataset_relative_path = os.path.join(test_path, 'SCORE/dataset_TEST/datasetDoc.json')
test_paths = {
'TRAIN': {
'dataset': os.path.join(test_path, 'TRAIN/dataset_TRAIN/datasetDoc.json'),
'problem': pathlib.Path(
os.path.abspath(os.path.join(test_path, 'TRAIN/problem_TRAIN/problemDoc.json'))).as_uri()
},
'TEST': {
'dataset': os.path.join(test_path, 'TEST/dataset_TEST/datasetDoc.json'),
'problem': pathlib.Path(
os.path.abspath(os.path.join(test_path, 'TEST/problem_TEST/problemDoc.json'))).as_uri()
},
'SCORE': {
'dataset': os.path.join(test_path, score_dataset_relative_path),
'problem': pathlib.Path(os.path.abspath(score_problem_relative_path)).as_uri()
},
}
return test_paths
if __name__ == '__main__':
# Creating parser
parser = argparse.ArgumentParser(description="Test from command line")
configure_parser(parser)
arguments = parser.parse_args()
# Getting test root path
test_path = arguments.test_path
# Getting test paths train/test/score
test_paths = get_paths(test_path)
# Getting problem id
test_id = get_problem_id(test_paths['TEST']['problem'])
print_name('Starting Test: ' + test_id)
run(test_paths, None)
print_name('Finishing Test: ' + test_id)
| 31.302083
| 117
| 0.717138
|
c35a1659aff0b7a706eb906ded01bb6281a11109
| 116
|
py
|
Python
|
__init__.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | 1
|
2020-02-11T19:05:17.000Z
|
2020-02-11T19:05:17.000Z
|
__init__.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | null | null | null |
__init__.py
|
anglebinbin/Barista-tool
|
2d51507fb3566881923f0b273127f59d23ed317f
|
[
"MIT"
] | null | null | null |
# required for providing correct package import, see https://stackoverflow.com/questions/448271/what-is-init-py-for
| 58
| 115
| 0.810345
|
fad79b5ded46394dfd6cd98da41e75a080dcd875
| 17,067
|
py
|
Python
|
packages/python/plotly/plotly/tests/test_optional/test_px/test_px_functions.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | 1
|
2021-10-08T18:37:36.000Z
|
2021-10-08T18:37:36.000Z
|
packages/python/plotly/plotly/tests/test_optional/test_px/test_px_functions.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/tests/test_optional/test_px/test_px_functions.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | null | null | null |
import plotly.express as px
import plotly.graph_objects as go
from numpy.testing import assert_array_equal
import numpy as np
import pandas as pd
import pytest
def _compare_figures(go_trace, px_fig):
"""Compare a figure created with a go trace and a figure created with
a px function call. Check that all values inside the go Figure are the
same in the px figure (which sets more parameters).
"""
go_fig = go.Figure(go_trace)
go_fig = go_fig.to_plotly_json()
px_fig = px_fig.to_plotly_json()
del go_fig["layout"]["template"]
del px_fig["layout"]["template"]
for key in go_fig["data"][0]:
assert_array_equal(go_fig["data"][0][key], px_fig["data"][0][key])
for key in go_fig["layout"]:
assert go_fig["layout"][key] == px_fig["layout"][key]
def test_pie_like_px():
# Pie
labels = ["Oxygen", "Hydrogen", "Carbon_Dioxide", "Nitrogen"]
values = [4500, 2500, 1053, 500]
fig = px.pie(names=labels, values=values)
trace = go.Pie(labels=labels, values=values)
_compare_figures(trace, fig)
labels = ["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"]
parents = ["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"]
values = [10, 14, 12, 10, 2, 6, 6, 4, 4]
# Sunburst
fig = px.sunburst(names=labels, parents=parents, values=values)
trace = go.Sunburst(labels=labels, parents=parents, values=values)
_compare_figures(trace, fig)
# Treemap
fig = px.treemap(names=labels, parents=parents, values=values)
trace = go.Treemap(labels=labels, parents=parents, values=values)
_compare_figures(trace, fig)
# Funnel
x = ["A", "B", "C"]
y = [3, 2, 1]
fig = px.funnel(y=y, x=x)
trace = go.Funnel(y=y, x=x)
_compare_figures(trace, fig)
# Funnelarea
fig = px.funnel_area(values=y, names=x)
trace = go.Funnelarea(values=y, labels=x)
_compare_figures(trace, fig)
def test_sunburst_treemap_colorscales():
labels = ["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"]
parents = ["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"]
values = [10, 14, 12, 10, 2, 6, 6, 4, 4]
for func, colorway in zip(
[px.sunburst, px.treemap], ["sunburstcolorway", "treemapcolorway"]
):
# Continuous colorscale
fig = func(
names=labels,
parents=parents,
values=values,
color=values,
color_continuous_scale="Viridis",
range_color=(5, 15),
)
assert fig.layout.coloraxis.cmin, fig.layout.coloraxis.cmax == (5, 15)
# Discrete colorscale, color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
parents=parents,
values=values,
color=labels,
color_discrete_sequence=color_seq,
)
assert np.all([col in color_seq for col in fig.data[0].marker.colors])
# Numerical color arg passed, fall back to continuous
fig = func(names=labels, parents=parents, values=values, color=values,)
assert [
el[0] == px.colors.sequential.Viridis
for i, el in enumerate(fig.layout.coloraxis.colorscale)
]
# Numerical color arg passed, continuous colorscale
# even if color_discrete_sequence if passed
fig = func(
names=labels,
parents=parents,
values=values,
color=values,
color_discrete_sequence=color_seq,
)
assert [
el[0] == px.colors.sequential.Viridis
for i, el in enumerate(fig.layout.coloraxis.colorscale)
]
# Discrete colorscale, no color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
parents=parents,
values=values,
color_discrete_sequence=color_seq,
)
assert list(fig.layout[colorway]) == color_seq
def test_sunburst_treemap_with_path():
vendors = ["A", "B", "C", "D", "E", "F", "G", "H"]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
"Tech",
"Tech",
"Finance",
"Finance",
]
regions = ["North", "North", "North", "North", "South", "South", "South", "South"]
values = [1, 3, 2, 4, 2, 2, 1, 4]
total = ["total"] * 8
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
)
)
path = ["total", "regions", "sectors", "vendors"]
# No values
fig = px.sunburst(df, path=path)
assert fig.data[0].branchvalues == "total"
# Values passed
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].branchvalues == "total"
assert fig.data[0].values[-1] == np.sum(values)
# Values passed
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].branchvalues == "total"
assert fig.data[0].values[-1] == np.sum(values)
# Error when values cannot be converted to numerical data type
df["values"] = ["1 000", "3 000", "2", "4", "2", "2", "1 000", "4 000"]
msg = "Column `values` of `df` could not be converted to a numerical data type."
with pytest.raises(ValueError, match=msg):
fig = px.sunburst(df, path=path, values="values")
# path is a mixture of column names and array-like
path = [df.total, "regions", df.sectors, "vendors"]
fig = px.sunburst(df, path=path)
assert fig.data[0].branchvalues == "total"
# Continuous colorscale
df["values"] = 1
fig = px.sunburst(df, path=path, values="values", color="values")
assert "coloraxis" in fig.data[0].marker
assert np.all(np.array(fig.data[0].marker.colors) == 1)
assert fig.data[0].values[-1] == 8
def test_sunburst_treemap_with_path_and_hover():
df = px.data.tips()
fig = px.sunburst(
df, path=["sex", "day", "time", "smoker"], color="smoker", hover_data=["smoker"]
)
assert "smoker" in fig.data[0].hovertemplate
df = px.data.gapminder().query("year == 2007")
fig = px.sunburst(
df, path=["continent", "country"], color="lifeExp", hover_data=df.columns
)
assert fig.layout.coloraxis.colorbar.title.text == "lifeExp"
df = px.data.tips()
fig = px.sunburst(df, path=["sex", "day", "time", "smoker"], hover_name="smoker")
assert "smoker" not in fig.data[0].hovertemplate # represented as '%{hovertext}'
assert "%{hovertext}" in fig.data[0].hovertemplate # represented as '%{hovertext}'
df = px.data.tips()
fig = px.sunburst(df, path=["sex", "day", "time", "smoker"], custom_data=["smoker"])
assert fig.data[0].customdata[0][0] in ["Yes", "No"]
assert "smoker" not in fig.data[0].hovertemplate
assert "%{hovertext}" not in fig.data[0].hovertemplate
def test_sunburst_treemap_with_path_color():
vendors = ["A", "B", "C", "D", "E", "F", "G", "H"]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
"Tech",
"Tech",
"Finance",
"Finance",
]
regions = ["North", "North", "North", "North", "South", "South", "South", "South"]
values = [1, 3, 2, 4, 2, 2, 1, 4]
calls = [8, 2, 1, 3, 2, 2, 4, 1]
total = ["total"] * 8
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
calls=calls,
)
)
path = ["total", "regions", "sectors", "vendors"]
fig = px.sunburst(df, path=path, values="values", color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
fig = px.sunburst(df, path=path, color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
# Hover info
df["hover"] = [el.lower() for el in vendors]
fig = px.sunburst(df, path=path, color="calls", hover_data=["hover"])
custom = fig.data[0].customdata
assert [el[0] for el in custom[:8]] == df["hover"].tolist()
assert [el[0] for el in custom[8:]] == ["(?)"] * 7
assert [el[1] for el in custom[:8]] == df["calls"].tolist()
# Discrete color
fig = px.sunburst(df, path=path, color="vendors")
assert len(np.unique(fig.data[0].marker.colors)) == 9
# Discrete color and color_discrete_map
cmap = {"Tech": "yellow", "Finance": "magenta", "(?)": "black"}
fig = px.sunburst(df, path=path, color="sectors", color_discrete_map=cmap)
assert np.all(np.in1d(fig.data[0].marker.colors, list(cmap.values())))
# Numerical column in path
df["regions"] = df["regions"].map({"North": 1, "South": 2})
path = ["total", "regions", "sectors", "vendors"]
fig = px.sunburst(df, path=path, values="values", color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
def test_sunburst_treemap_column_parent():
vendors = ["A", "B", "C", "D", "E", "F", "G", "H"]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
"Tech",
"Tech",
"Finance",
"Finance",
]
regions = ["North", "North", "North", "North", "South", "South", "South", "South"]
values = [1, 3, 2, 4, 2, 2, 1, 4]
df = pd.DataFrame(dict(id=vendors, sectors=sectors, parent=regions, values=values,))
path = ["parent", "sectors", "id"]
# One column of the path is a reserved name - this is ok and should not raise
px.sunburst(df, path=path, values="values")
def test_sunburst_treemap_with_path_non_rectangular():
vendors = ["A", "B", "C", "D", None, "E", "F", "G", "H", None]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
None,
"Tech",
"Tech",
"Finance",
"Finance",
"Finance",
]
regions = [
"North",
"North",
"North",
"North",
"North",
"South",
"South",
"South",
"South",
"South",
]
values = [1, 3, 2, 4, 1, 2, 2, 1, 4, 1]
total = ["total"] * 10
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
)
)
path = ["total", "regions", "sectors", "vendors"]
msg = "Non-leaves rows are not permitted in the dataframe"
with pytest.raises(ValueError, match=msg):
fig = px.sunburst(df, path=path, values="values")
df.loc[df["vendors"].isnull(), "sectors"] = "Other"
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].values[-1] == np.sum(values)
def test_pie_funnelarea_colorscale():
labels = ["A", "B", "C", "D"]
values = [3, 2, 1, 4]
for func, colorway in zip(
[px.sunburst, px.treemap], ["sunburstcolorway", "treemapcolorway"]
):
# Discrete colorscale, no color arg passed
color_seq = px.colors.sequential.Reds
fig = func(names=labels, values=values, color_discrete_sequence=color_seq,)
assert list(fig.layout[colorway]) == color_seq
# Discrete colorscale, color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
values=values,
color=labels,
color_discrete_sequence=color_seq,
)
assert np.all([col in color_seq for col in fig.data[0].marker.colors])
def test_funnel():
fig = px.funnel(
x=[5, 4, 3, 3, 2, 1],
y=["A", "B", "C", "A", "B", "C"],
color=["0", "0", "0", "1", "1", "1"],
)
assert len(fig.data) == 2
def test_parcats_dimensions_max():
df = px.data.tips()
# default behaviour
fig = px.parallel_categories(df)
assert [d.label for d in fig.data[0].dimensions] == [
"sex",
"smoker",
"day",
"time",
"size",
]
# explicit subset of default
fig = px.parallel_categories(df, dimensions=["sex", "smoker", "day"])
assert [d.label for d in fig.data[0].dimensions] == ["sex", "smoker", "day"]
# shrinking max
fig = px.parallel_categories(df, dimensions_max_cardinality=4)
assert [d.label for d in fig.data[0].dimensions] == [
"sex",
"smoker",
"day",
"time",
]
# explicit superset of default, violating the max
fig = px.parallel_categories(
df, dimensions=["sex", "smoker", "day", "size"], dimensions_max_cardinality=4
)
assert [d.label for d in fig.data[0].dimensions] == ["sex", "smoker", "day", "size"]
@pytest.mark.parametrize("histfunc,y", [(None, None), ("count", "tip")])
def test_histfunc_hoverlabels_univariate(histfunc, y):
def check_label(label, fig):
assert fig.layout.yaxis.title.text == label
assert label + "=" in fig.data[0].hovertemplate
df = px.data.tips()
# base case, just "count" (note count(tip) is same as count())
fig = px.histogram(df, x="total_bill", y=y, histfunc=histfunc)
check_label("count", fig)
# without y, label is just histnorm
for histnorm in ["probability", "percent", "density", "probability density"]:
fig = px.histogram(
df, x="total_bill", y=y, histfunc=histfunc, histnorm=histnorm
)
check_label(histnorm, fig)
for histnorm in ["probability", "percent", "density", "probability density"]:
for barnorm in ["percent", "fraction"]:
fig = px.histogram(
df,
x="total_bill",
y=y,
histfunc=histfunc,
histnorm=histnorm,
barnorm=barnorm,
)
check_label("%s (normalized as %s)" % (histnorm, barnorm), fig)
def test_histfunc_hoverlabels_bivariate():
def check_label(label, fig):
assert fig.layout.yaxis.title.text == label
assert label + "=" in fig.data[0].hovertemplate
df = px.data.tips()
# with y, should be same as forcing histfunc to sum
fig = px.histogram(df, x="total_bill", y="tip")
check_label("sum of tip", fig)
# change probability to fraction when histfunc is sum
fig = px.histogram(df, x="total_bill", y="tip", histnorm="probability")
check_label("fraction of sum of tip", fig)
# percent is percent
fig = px.histogram(df, x="total_bill", y="tip", histnorm="percent")
check_label("percent of sum of tip", fig)
# the other two are "weighted by"
for histnorm in ["density", "probability density"]:
fig = px.histogram(df, x="total_bill", y="tip", histnorm=histnorm)
check_label("%s weighted by tip" % histnorm, fig)
# check a few "normalized by"
for histnorm in ["density", "probability density"]:
for barnorm in ["fraction", "percent"]:
fig = px.histogram(
df, x="total_bill", y="tip", histnorm=histnorm, barnorm=barnorm
)
check_label(
"%s weighted by tip (normalized as %s)" % (histnorm, barnorm), fig
)
# these next two are weird but OK...
fig = px.histogram(
df,
x="total_bill",
y="tip",
histfunc="min",
histnorm="probability",
barnorm="percent",
)
check_label("fraction of sum of min of tip (normalized as percent)", fig)
fig = px.histogram(
df,
x="total_bill",
y="tip",
histfunc="avg",
histnorm="percent",
barnorm="fraction",
)
check_label("percent of sum of avg of tip (normalized as fraction)", fig)
# this next one is basically "never do this" but needs a defined behaviour
fig = px.histogram(df, x="total_bill", y="tip", histfunc="max", histnorm="density")
check_label("density of max of tip", fig)
def test_timeline():
df = pd.DataFrame(
[
dict(Task="Job A", Start="2009-01-01", Finish="2009-02-28"),
dict(Task="Job B", Start="2009-03-05", Finish="2009-04-15"),
dict(Task="Job C", Start="2009-02-20", Finish="2009-05-30"),
]
)
fig = px.timeline(df, x_start="Start", x_end="Finish", y="Task", color="Task")
assert len(fig.data) == 3
assert fig.layout.xaxis.type == "date"
assert fig.layout.xaxis.title.text is None
fig = px.timeline(df, x_start="Start", x_end="Finish", y="Task", facet_row="Task")
assert len(fig.data) == 3
assert fig.data[1].xaxis == "x2"
assert fig.layout.xaxis.type == "date"
msg = "Both x_start and x_end are required"
with pytest.raises(ValueError, match=msg):
px.timeline(df, x_start="Start", y="Task", color="Task")
msg = "Both x_start and x_end must refer to data convertible to datetimes."
with pytest.raises(TypeError, match=msg):
px.timeline(df, x_start="Start", x_end=["a", "b", "c"], y="Task", color="Task")
| 34.271084
| 88
| 0.576786
|
33af7bca94df4bd7c6d4b1304c2512f85d9ab759
| 3,317
|
py
|
Python
|
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/_configuration.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-01-24T08:54:57.000Z
|
2022-01-24T08:54:57.000Z
|
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/_configuration.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/_configuration.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class FrontDoorManagementClientConfiguration(Configuration):
"""Configuration for FrontDoorManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(FrontDoorManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-frontdoor/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.779412
| 180
| 0.700935
|
51d0bd367f395a42963328f02eb81555b11d6930
| 72,935
|
py
|
Python
|
venv/lib/python2.7/site-packages/pyramid/tests/test_viewderivers.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/pyramid/tests/test_viewderivers.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/pyramid/tests/test_viewderivers.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
|
78b057b9158e7e818de833f1fd91c7f5d598ac8a
|
[
"MIT"
] | null | null | null |
import unittest
from zope.interface import implementer
from pyramid import testing
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import (
IResponse,
IRequest,
)
class TestDeriveView(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
self.config = None
testing.tearDown()
def _makeRequest(self):
request = DummyRequest()
request.registry = self.config.registry
return request
def _registerLogger(self):
from pyramid.interfaces import IDebugLogger
logger = DummyLogger()
self.config.registry.registerUtility(logger, IDebugLogger)
return logger
def _registerSecurityPolicy(self, permissive):
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.interfaces import IAuthorizationPolicy
policy = DummySecurityPolicy(permissive)
self.config.registry.registerUtility(policy, IAuthenticationPolicy)
self.config.registry.registerUtility(policy, IAuthorizationPolicy)
def test_function_returns_non_adaptable(self):
def view(request):
return None
result = self.config.derive_view(view)
self.assertFalse(result is view)
try:
result(None, None)
except ValueError as e:
self.assertEqual(
e.args[0],
'Could not convert return value of the view callable function '
'pyramid.tests.test_viewderivers.view into a response '
'object. The value returned was None. You may have forgotten '
'to return a value from the view callable.'
)
else: # pragma: no cover
raise AssertionError
def test_function_returns_non_adaptable_dict(self):
def view(request):
return {'a':1}
result = self.config.derive_view(view)
self.assertFalse(result is view)
try:
result(None, None)
except ValueError as e:
self.assertEqual(
e.args[0],
"Could not convert return value of the view callable function "
"pyramid.tests.test_viewderivers.view into a response "
"object. The value returned was {'a': 1}. You may have "
"forgotten to define a renderer in the view configuration."
)
else: # pragma: no cover
raise AssertionError
def test_instance_returns_non_adaptable(self):
class AView(object):
def __call__(self, request):
return None
view = AView()
result = self.config.derive_view(view)
self.assertFalse(result is view)
try:
result(None, None)
except ValueError as e:
msg = e.args[0]
self.assertTrue(msg.startswith(
'Could not convert return value of the view callable object '
'<pyramid.tests.test_viewderivers.'))
self.assertTrue(msg.endswith(
'> into a response object. The value returned was None. You '
'may have forgotten to return a value from the view callable.'))
else: # pragma: no cover
raise AssertionError
def test_function_returns_true_Response_no_renderer(self):
from pyramid.response import Response
r = Response('Hello')
def view(request):
return r
result = self.config.derive_view(view)
self.assertFalse(result is view)
response = result(None, None)
self.assertEqual(response, r)
def test_function_returns_true_Response_with_renderer(self):
from pyramid.response import Response
r = Response('Hello')
def view(request):
return r
renderer = object()
result = self.config.derive_view(view)
self.assertFalse(result is view)
response = result(None, None)
self.assertEqual(response, r)
def test_requestonly_default_method_returns_non_adaptable(self):
request = DummyRequest()
class AView(object):
def __init__(self, request):
pass
def __call__(self):
return None
result = self.config.derive_view(AView)
self.assertFalse(result is AView)
try:
result(None, request)
except ValueError as e:
self.assertEqual(
e.args[0],
'Could not convert return value of the view callable '
'method __call__ of '
'class pyramid.tests.test_viewderivers.AView into a '
'response object. The value returned was None. You may have '
'forgotten to return a value from the view callable.'
)
else: # pragma: no cover
raise AssertionError
def test_requestonly_nondefault_method_returns_non_adaptable(self):
request = DummyRequest()
class AView(object):
def __init__(self, request):
pass
def theviewmethod(self):
return None
result = self.config.derive_view(AView, attr='theviewmethod')
self.assertFalse(result is AView)
try:
result(None, request)
except ValueError as e:
self.assertEqual(
e.args[0],
'Could not convert return value of the view callable '
'method theviewmethod of '
'class pyramid.tests.test_viewderivers.AView into a '
'response object. The value returned was None. You may have '
'forgotten to return a value from the view callable.'
)
else: # pragma: no cover
raise AssertionError
def test_requestonly_function(self):
response = DummyResponse()
def view(request):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(result(None, None), response)
def test_requestonly_function_with_renderer(self):
response = DummyResponse()
class moo(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, 'OK')
self.assertEqual(view_inst, view)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
def view(request):
return 'OK'
result = self.config.derive_view(view, renderer=moo())
self.assertFalse(result.__wraps__ is view)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_requestonly_function_with_renderer_request_override(self):
def moo(info):
def inner(value, system):
self.assertEqual(value, 'OK')
self.assertEqual(system['request'], request)
self.assertEqual(system['context'], context)
return b'moo'
return inner
def view(request):
return 'OK'
self.config.add_renderer('moo', moo)
result = self.config.derive_view(view, renderer='string')
self.assertFalse(result is view)
request = self._makeRequest()
request.override_renderer = 'moo'
context = testing.DummyResource()
self.assertEqual(result(context, request).body, b'moo')
def test_requestonly_function_with_renderer_request_has_view(self):
response = DummyResponse()
class moo(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, 'OK')
self.assertEqual(view_inst, 'view')
self.assertEqual(ctx, context)
return response
def clone(self):
return self
def view(request):
return 'OK'
result = self.config.derive_view(view, renderer=moo())
self.assertFalse(result.__wraps__ is view)
request = self._makeRequest()
request.__view__ = 'view'
context = testing.DummyResource()
r = result(context, request)
self.assertEqual(r, response)
self.assertFalse(hasattr(request, '__view__'))
def test_class_without_attr(self):
response = DummyResponse()
class View(object):
def __init__(self, request):
pass
def __call__(self):
return response
result = self.config.derive_view(View)
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, View)
def test_class_with_attr(self):
response = DummyResponse()
class View(object):
def __init__(self, request):
pass
def another(self):
return response
result = self.config.derive_view(View, attr='another')
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, View)
def test_as_function_context_and_request(self):
def view(context, request):
return 'OK'
result = self.config.derive_view(view)
self.assertTrue(result.__wraps__ is view)
self.assertFalse(hasattr(result, '__call_permissive__'))
self.assertEqual(view(None, None), 'OK')
def test_as_function_requestonly(self):
response = DummyResponse()
def view(request):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
self.assertEqual(result(None, None), response)
def test_as_newstyle_class_context_and_request(self):
response = DummyResponse()
class view(object):
def __init__(self, context, request):
pass
def __call__(self):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, view)
def test_as_newstyle_class_requestonly(self):
response = DummyResponse()
class view(object):
def __init__(self, context, request):
pass
def __call__(self):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, view)
def test_as_oldstyle_class_context_and_request(self):
response = DummyResponse()
class view:
def __init__(self, context, request):
pass
def __call__(self):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, view)
def test_as_oldstyle_class_requestonly(self):
response = DummyResponse()
class view:
def __init__(self, context, request):
pass
def __call__(self):
return response
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
self.assertEqual(result(None, request), response)
self.assertEqual(request.__view__.__class__, view)
def test_as_instance_context_and_request(self):
response = DummyResponse()
class View:
def __call__(self, context, request):
return response
view = View()
result = self.config.derive_view(view)
self.assertTrue(result.__wraps__ is view)
self.assertFalse(hasattr(result, '__call_permissive__'))
self.assertEqual(result(None, None), response)
def test_as_instance_requestonly(self):
response = DummyResponse()
class View:
def __call__(self, request):
return response
view = View()
result = self.config.derive_view(view)
self.assertFalse(result is view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertTrue('test_viewderivers' in result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
self.assertEqual(result(None, None), response)
def test_with_debug_authorization_no_authpol(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): Allowed "
"(no authorization policy in use)")
def test_with_debug_authorization_authn_policy_no_authz_policy(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(debug_authorization=True)
from pyramid.interfaces import IAuthenticationPolicy
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthenticationPolicy)
logger = self._registerLogger()
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): Allowed "
"(no authorization policy in use)")
def test_with_debug_authorization_authz_policy_no_authn_policy(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(debug_authorization=True)
from pyramid.interfaces import IAuthorizationPolicy
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthorizationPolicy)
logger = self._registerLogger()
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): Allowed "
"(no authorization policy in use)")
def test_with_debug_authorization_no_permission(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
self._registerSecurityPolicy(True)
logger = self._registerLogger()
result = self.config._derive_view(view)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): Allowed ("
"no permission registered)")
def test_debug_auth_permission_authpol_permitted(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
self._registerSecurityPolicy(True)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertEqual(result.__call_permissive__.__wraps__, view)
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): True")
def test_debug_auth_permission_authpol_permitted_no_request(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
self._registerSecurityPolicy(True)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertEqual(result.__call_permissive__.__wraps__, view)
self.assertEqual(result(None, None), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url None (view name "
"None against context None): True")
def test_debug_auth_permission_authpol_denied(self):
from pyramid.httpexceptions import HTTPForbidden
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
self._registerSecurityPolicy(False)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertEqual(result.__call_permissive__.__wraps__, view)
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertRaises(HTTPForbidden, result, None, request)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): False")
def test_debug_auth_permission_authpol_denied2(self):
view = lambda *arg: 'OK'
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
self._registerLogger()
self._registerSecurityPolicy(False)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
permitted = result.__permitted__(None, None)
self.assertEqual(permitted, False)
def test_debug_auth_permission_authpol_overridden(self):
from pyramid.security import NO_PERMISSION_REQUIRED
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
self._registerSecurityPolicy(False)
result = self.config._derive_view(view, permission=NO_PERMISSION_REQUIRED)
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context None): "
"Allowed (NO_PERMISSION_REQUIRED)")
def test_debug_auth_permission_authpol_permitted_excview(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = dict(
debug_authorization=True, reload_templates=True)
logger = self._registerLogger()
self._registerSecurityPolicy(True)
result = self.config._derive_view(
view, context=Exception, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertEqual(result.__call_permissive__.__wraps__, view)
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(Exception(), request), response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(logger.messages[0],
"debug_authorization of url url (view name "
"'view_name' against context Exception()): True")
def test_secured_view_authn_policy_no_authz_policy(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = {}
from pyramid.interfaces import IAuthenticationPolicy
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthenticationPolicy)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
def test_secured_view_authz_policy_no_authn_policy(self):
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = {}
from pyramid.interfaces import IAuthorizationPolicy
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthorizationPolicy)
result = self.config._derive_view(view, permission='view')
self.assertEqual(view.__module__, result.__module__)
self.assertEqual(view.__doc__, result.__doc__)
self.assertEqual(view.__name__, result.__name__)
self.assertFalse(hasattr(result, '__call_permissive__'))
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
self.assertEqual(result(None, request), response)
def test_secured_view_raises_forbidden_no_name(self):
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.interfaces import IAuthorizationPolicy
from pyramid.httpexceptions import HTTPForbidden
response = DummyResponse()
view = lambda *arg: response
self.config.registry.settings = {}
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthenticationPolicy)
self.config.registry.registerUtility(policy, IAuthorizationPolicy)
result = self.config._derive_view(view, permission='view')
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
try:
result(None, request)
except HTTPForbidden as e:
self.assertEqual(e.message,
'Unauthorized: <lambda> failed permission check')
else: # pragma: no cover
raise AssertionError
def test_secured_view_raises_forbidden_with_name(self):
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.interfaces import IAuthorizationPolicy
from pyramid.httpexceptions import HTTPForbidden
def myview(request): pass
self.config.registry.settings = {}
policy = DummySecurityPolicy(False)
self.config.registry.registerUtility(policy, IAuthenticationPolicy)
self.config.registry.registerUtility(policy, IAuthorizationPolicy)
result = self.config._derive_view(myview, permission='view')
request = self._makeRequest()
request.view_name = 'view_name'
request.url = 'url'
try:
result(None, request)
except HTTPForbidden as e:
self.assertEqual(e.message,
'Unauthorized: myview failed permission check')
else: # pragma: no cover
raise AssertionError
def test_secured_view_skipped_by_default_on_exception_view(self):
from pyramid.request import Request
from pyramid.security import NO_PERMISSION_REQUIRED
def view(request):
raise ValueError
def excview(request):
return 'hello'
self._registerSecurityPolicy(False)
self.config.add_settings({'debug_authorization': True})
self.config.set_default_permission('view')
self.config.add_view(view, name='foo', permission=NO_PERMISSION_REQUIRED)
self.config.add_view(excview, context=ValueError, renderer='string')
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
response = request.get_response(app)
self.assertTrue(b'hello' in response.body)
def test_secured_view_failed_on_explicit_exception_view(self):
from pyramid.httpexceptions import HTTPForbidden
from pyramid.request import Request
from pyramid.security import NO_PERMISSION_REQUIRED
def view(request):
raise ValueError
def excview(request): pass
self._registerSecurityPolicy(False)
self.config.add_view(view, name='foo', permission=NO_PERMISSION_REQUIRED)
self.config.add_view(excview, context=ValueError, renderer='string',
permission='view')
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
try:
request.get_response(app)
except HTTPForbidden:
pass
else: # pragma: no cover
raise AssertionError
def test_secured_view_passed_on_explicit_exception_view(self):
from pyramid.request import Request
from pyramid.security import NO_PERMISSION_REQUIRED
def view(request):
raise ValueError
def excview(request):
return 'hello'
self._registerSecurityPolicy(True)
self.config.add_view(view, name='foo', permission=NO_PERMISSION_REQUIRED)
self.config.add_view(excview, context=ValueError, renderer='string',
permission='view')
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
request.headers['X-CSRF-Token'] = 'foo'
response = request.get_response(app)
self.assertTrue(b'hello' in response.body)
def test_predicate_mismatch_view_has_no_name(self):
from pyramid.exceptions import PredicateMismatch
response = DummyResponse()
view = lambda *arg: response
def predicate1(context, request):
return False
predicate1.text = lambda *arg: 'text'
result = self.config._derive_view(view, predicates=[predicate1])
request = self._makeRequest()
request.method = 'POST'
try:
result(None, None)
except PredicateMismatch as e:
self.assertEqual(e.detail,
'predicate mismatch for view <lambda> (text)')
else: # pragma: no cover
raise AssertionError
def test_predicate_mismatch_view_has_name(self):
from pyramid.exceptions import PredicateMismatch
def myview(request): pass
def predicate1(context, request):
return False
predicate1.text = lambda *arg: 'text'
result = self.config._derive_view(myview, predicates=[predicate1])
request = self._makeRequest()
request.method = 'POST'
try:
result(None, None)
except PredicateMismatch as e:
self.assertEqual(e.detail,
'predicate mismatch for view myview (text)')
else: # pragma: no cover
raise AssertionError
def test_predicate_mismatch_exception_has_text_in_detail(self):
from pyramid.exceptions import PredicateMismatch
def myview(request): pass
def predicate1(context, request):
return True
predicate1.text = lambda *arg: 'pred1'
def predicate2(context, request):
return False
predicate2.text = lambda *arg: 'pred2'
result = self.config._derive_view(myview,
predicates=[predicate1, predicate2])
request = self._makeRequest()
request.method = 'POST'
try:
result(None, None)
except PredicateMismatch as e:
self.assertEqual(e.detail,
'predicate mismatch for view myview (pred2)')
else: # pragma: no cover
raise AssertionError
def test_with_predicates_all(self):
response = DummyResponse()
view = lambda *arg: response
predicates = []
def predicate1(context, request):
predicates.append(True)
return True
def predicate2(context, request):
predicates.append(True)
return True
result = self.config._derive_view(view,
predicates=[predicate1, predicate2])
request = self._makeRequest()
request.method = 'POST'
next = result(None, None)
self.assertEqual(next, response)
self.assertEqual(predicates, [True, True])
def test_with_predicates_checker(self):
view = lambda *arg: 'OK'
predicates = []
def predicate1(context, request):
predicates.append(True)
return True
def predicate2(context, request):
predicates.append(True)
return True
result = self.config._derive_view(view,
predicates=[predicate1, predicate2])
request = self._makeRequest()
request.method = 'POST'
next = result.__predicated__(None, None)
self.assertEqual(next, True)
self.assertEqual(predicates, [True, True])
def test_with_predicates_notall(self):
from pyramid.httpexceptions import HTTPNotFound
view = lambda *arg: 'OK'
predicates = []
def predicate1(context, request):
predicates.append(True)
return True
predicate1.text = lambda *arg: 'text'
def predicate2(context, request):
predicates.append(True)
return False
predicate2.text = lambda *arg: 'text'
result = self.config._derive_view(view,
predicates=[predicate1, predicate2])
request = self._makeRequest()
request.method = 'POST'
self.assertRaises(HTTPNotFound, result, None, None)
self.assertEqual(predicates, [True, True])
def test_with_wrapper_viewname(self):
from pyramid.response import Response
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
inner_response = Response('OK')
def inner_view(context, request):
return inner_response
def outer_view(context, request):
self.assertEqual(request.wrapped_response, inner_response)
self.assertEqual(request.wrapped_body, inner_response.body)
self.assertEqual(request.wrapped_view.__original_view__,
inner_view)
return Response(b'outer ' + request.wrapped_body)
self.config.registry.registerAdapter(
outer_view, (IViewClassifier, None, None), IView, 'owrap')
result = self.config._derive_view(inner_view, viewname='inner',
wrapper_viewname='owrap')
self.assertFalse(result is inner_view)
self.assertEqual(inner_view.__module__, result.__module__)
self.assertEqual(inner_view.__doc__, result.__doc__)
request = self._makeRequest()
response = result(None, request)
self.assertEqual(response.body, b'outer OK')
def test_with_wrapper_viewname_notfound(self):
from pyramid.response import Response
inner_response = Response('OK')
def inner_view(context, request):
return inner_response
wrapped = self.config._derive_view(inner_view, viewname='inner',
wrapper_viewname='owrap')
request = self._makeRequest()
self.assertRaises(ValueError, wrapped, None, request)
def test_as_newstyle_class_context_and_request_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst.__class__, View)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View(object):
def __init__(self, context, request):
pass
def index(self):
return {'a':'1'}
result = self.config._derive_view(View,
renderer=renderer(), attr='index')
self.assertFalse(result is View)
self.assertEqual(result.__module__, View.__module__)
self.assertEqual(result.__doc__, View.__doc__)
self.assertEqual(result.__name__, View.__name__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_as_newstyle_class_requestonly_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst.__class__, View)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View(object):
def __init__(self, request):
pass
def index(self):
return {'a':'1'}
result = self.config.derive_view(View,
renderer=renderer(), attr='index')
self.assertFalse(result is View)
self.assertEqual(result.__module__, View.__module__)
self.assertEqual(result.__doc__, View.__doc__)
self.assertEqual(result.__name__, View.__name__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_as_oldstyle_cls_context_request_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst.__class__, View)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View:
def __init__(self, context, request):
pass
def index(self):
return {'a':'1'}
result = self.config.derive_view(View,
renderer=renderer(), attr='index')
self.assertFalse(result is View)
self.assertEqual(result.__module__, View.__module__)
self.assertEqual(result.__doc__, View.__doc__)
self.assertEqual(result.__name__, View.__name__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_as_oldstyle_cls_requestonly_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst.__class__, View)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View:
def __init__(self, request):
pass
def index(self):
return {'a':'1'}
result = self.config.derive_view(View,
renderer=renderer(), attr='index')
self.assertFalse(result is View)
self.assertEqual(result.__module__, View.__module__)
self.assertEqual(result.__doc__, View.__doc__)
self.assertEqual(result.__name__, View.__name__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_as_instance_context_and_request_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst, view)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View:
def index(self, context, request):
return {'a':'1'}
view = View()
result = self.config.derive_view(view,
renderer=renderer(), attr='index')
self.assertFalse(result is view)
self.assertEqual(result.__module__, view.__module__)
self.assertEqual(result.__doc__, view.__doc__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_as_instance_requestonly_attr_and_renderer(self):
response = DummyResponse()
class renderer(object):
def render_view(inself, req, resp, view_inst, ctx):
self.assertEqual(req, request)
self.assertEqual(resp, {'a':'1'})
self.assertEqual(view_inst, view)
self.assertEqual(ctx, context)
return response
def clone(self):
return self
class View:
def index(self, request):
return {'a':'1'}
view = View()
result = self.config.derive_view(view,
renderer=renderer(), attr='index')
self.assertFalse(result is view)
self.assertEqual(result.__module__, view.__module__)
self.assertEqual(result.__doc__, view.__doc__)
request = self._makeRequest()
context = testing.DummyResource()
self.assertEqual(result(context, request), response)
def test_with_view_mapper_config_specified(self):
response = DummyResponse()
class mapper(object):
def __init__(self, **kw):
self.kw = kw
def __call__(self, view):
def wrapped(context, request):
return response
return wrapped
def view(context, request): return 'NOTOK'
result = self.config._derive_view(view, mapper=mapper)
self.assertFalse(result.__wraps__ is view)
self.assertEqual(result(None, None), response)
def test_with_view_mapper_view_specified(self):
from pyramid.response import Response
response = Response()
def mapper(**kw):
def inner(view):
def superinner(context, request):
self.assertEqual(request, None)
return response
return superinner
return inner
def view(context, request): return 'NOTOK'
view.__view_mapper__ = mapper
result = self.config.derive_view(view)
self.assertFalse(result.__wraps__ is view)
self.assertEqual(result(None, None), response)
def test_with_view_mapper_default_mapper_specified(self):
from pyramid.response import Response
response = Response()
def mapper(**kw):
def inner(view):
def superinner(context, request):
self.assertEqual(request, None)
return response
return superinner
return inner
self.config.set_view_mapper(mapper)
def view(context, request): return 'NOTOK'
result = self.config.derive_view(view)
self.assertFalse(result.__wraps__ is view)
self.assertEqual(result(None, None), response)
def test_attr_wrapped_view_branching_default_phash(self):
from pyramid.config.util import DEFAULT_PHASH
def view(context, request): pass
result = self.config._derive_view(view, phash=DEFAULT_PHASH)
self.assertEqual(result.__wraps__, view)
def test_attr_wrapped_view_branching_nondefault_phash(self):
def view(context, request): pass
result = self.config._derive_view(view, phash='nondefault')
self.assertNotEqual(result, view)
def test_http_cached_view_integer(self):
import datetime
from pyramid.response import Response
response = Response('OK')
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view, http_cache=3600)
self.assertFalse(result is inner_view)
self.assertEqual(inner_view.__module__, result.__module__)
self.assertEqual(inner_view.__doc__, result.__doc__)
request = self._makeRequest()
when = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
result = result(None, request)
self.assertEqual(result, response)
headers = dict(result.headerlist)
expires = parse_httpdate(headers['Expires'])
assert_similar_datetime(expires, when)
self.assertEqual(headers['Cache-Control'], 'max-age=3600')
def test_http_cached_view_timedelta(self):
import datetime
from pyramid.response import Response
response = Response('OK')
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view,
http_cache=datetime.timedelta(hours=1))
self.assertFalse(result is inner_view)
self.assertEqual(inner_view.__module__, result.__module__)
self.assertEqual(inner_view.__doc__, result.__doc__)
request = self._makeRequest()
when = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
result = result(None, request)
self.assertEqual(result, response)
headers = dict(result.headerlist)
expires = parse_httpdate(headers['Expires'])
assert_similar_datetime(expires, when)
self.assertEqual(headers['Cache-Control'], 'max-age=3600')
def test_http_cached_view_tuple(self):
import datetime
from pyramid.response import Response
response = Response('OK')
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view,
http_cache=(3600, {'public':True}))
self.assertFalse(result is inner_view)
self.assertEqual(inner_view.__module__, result.__module__)
self.assertEqual(inner_view.__doc__, result.__doc__)
request = self._makeRequest()
when = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
result = result(None, request)
self.assertEqual(result, response)
headers = dict(result.headerlist)
expires = parse_httpdate(headers['Expires'])
assert_similar_datetime(expires, when)
self.assertEqual(headers['Cache-Control'], 'max-age=3600, public')
def test_http_cached_view_tuple_seconds_None(self):
from pyramid.response import Response
response = Response('OK')
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view,
http_cache=(None, {'public':True}))
self.assertFalse(result is inner_view)
self.assertEqual(inner_view.__module__, result.__module__)
self.assertEqual(inner_view.__doc__, result.__doc__)
request = self._makeRequest()
result = result(None, request)
self.assertEqual(result, response)
headers = dict(result.headerlist)
self.assertFalse('Expires' in headers)
self.assertEqual(headers['Cache-Control'], 'public')
def test_http_cached_view_prevent_auto_set(self):
from pyramid.response import Response
response = Response()
response.cache_control.prevent_auto = True
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view, http_cache=3600)
request = self._makeRequest()
result = result(None, request)
self.assertEqual(result, response) # doesn't blow up
headers = dict(result.headerlist)
self.assertFalse('Expires' in headers)
self.assertFalse('Cache-Control' in headers)
def test_http_cached_prevent_http_cache_in_settings(self):
self.config.registry.settings['prevent_http_cache'] = True
from pyramid.response import Response
response = Response()
def inner_view(context, request):
return response
result = self.config._derive_view(inner_view, http_cache=3600)
request = self._makeRequest()
result = result(None, request)
self.assertEqual(result, response)
headers = dict(result.headerlist)
self.assertFalse('Expires' in headers)
self.assertFalse('Cache-Control' in headers)
def test_http_cached_view_bad_tuple(self):
def view(request): pass
self.assertRaises(ConfigurationError, self.config._derive_view,
view, http_cache=(None,))
def test_csrf_view_ignores_GET(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.method = 'GET'
view = self.config._derive_view(inner_view, require_csrf=True)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_fails_with_bad_POST_header(self):
from pyramid.exceptions import BadCSRFToken
def inner_view(request): pass
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.headers = {'X-CSRF-Token': 'bar'}
view = self.config._derive_view(inner_view, require_csrf=True)
self.assertRaises(BadCSRFToken, lambda: view(None, request))
def test_csrf_view_passes_with_good_POST_header(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.headers = {'X-CSRF-Token': 'foo'}
view = self.config._derive_view(inner_view, require_csrf=True)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_fails_with_bad_POST_token(self):
from pyramid.exceptions import BadCSRFToken
def inner_view(request): pass
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.POST = {'csrf_token': 'bar'}
view = self.config._derive_view(inner_view, require_csrf=True)
self.assertRaises(BadCSRFToken, lambda: view(None, request))
def test_csrf_view_passes_with_good_POST_token(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.POST = {'csrf_token': 'foo'}
view = self.config._derive_view(inner_view, require_csrf=True)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_https_domain(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "https"
request.domain = "example.com"
request.host_port = "443"
request.referrer = "https://example.com/login/"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.POST = {'csrf_token': 'foo'}
view = self.config._derive_view(inner_view, require_csrf=True)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_fails_on_bad_PUT_header(self):
from pyramid.exceptions import BadCSRFToken
def inner_view(request): pass
request = self._makeRequest()
request.scheme = "http"
request.method = 'PUT'
request.session = DummySession({'csrf_token': 'foo'})
request.headers = {'X-CSRF-Token': 'bar'}
view = self.config._derive_view(inner_view, require_csrf=True)
self.assertRaises(BadCSRFToken, lambda: view(None, request))
def test_csrf_view_fails_on_bad_referrer(self):
from pyramid.exceptions import BadCSRFOrigin
def inner_view(request): pass
request = self._makeRequest()
request.method = "POST"
request.scheme = "https"
request.host_port = "443"
request.domain = "example.com"
request.referrer = "https://not-example.com/evil/"
request.registry.settings = {}
view = self.config._derive_view(inner_view, require_csrf=True)
self.assertRaises(BadCSRFOrigin, lambda: view(None, request))
def test_csrf_view_fails_on_bad_origin(self):
from pyramid.exceptions import BadCSRFOrigin
def inner_view(request): pass
request = self._makeRequest()
request.method = "POST"
request.scheme = "https"
request.host_port = "443"
request.domain = "example.com"
request.headers = {"Origin": "https://not-example.com/evil/"}
request.registry.settings = {}
view = self.config._derive_view(inner_view, require_csrf=True)
self.assertRaises(BadCSRFOrigin, lambda: view(None, request))
def test_csrf_view_enabled_by_default(self):
from pyramid.exceptions import BadCSRFToken
def inner_view(request): pass
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
self.config.set_default_csrf_options(require_csrf=True)
view = self.config._derive_view(inner_view)
self.assertRaises(BadCSRFToken, lambda: view(None, request))
def test_csrf_view_enabled_via_callback(self):
def callback(request):
return True
from pyramid.exceptions import BadCSRFToken
def inner_view(request): pass
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
self.config.set_default_csrf_options(require_csrf=True, callback=callback)
view = self.config._derive_view(inner_view)
self.assertRaises(BadCSRFToken, lambda: view(None, request))
def test_csrf_view_disabled_via_callback(self):
def callback(request):
return False
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
self.config.set_default_csrf_options(require_csrf=True, callback=callback)
view = self.config._derive_view(inner_view)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_uses_custom_csrf_token(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.POST = {'DUMMY': 'foo'}
self.config.set_default_csrf_options(require_csrf=True, token='DUMMY')
view = self.config._derive_view(inner_view)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_uses_custom_csrf_header(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.headers = {'DUMMY': 'foo'}
self.config.set_default_csrf_options(require_csrf=True, header='DUMMY')
view = self.config._derive_view(inner_view)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_uses_custom_methods(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'PUT'
request.session = DummySession({'csrf_token': 'foo'})
self.config.set_default_csrf_options(
require_csrf=True, safe_methods=['PUT'])
view = self.config._derive_view(inner_view)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_uses_view_option_override(self):
response = DummyResponse()
def inner_view(request):
return response
request = self._makeRequest()
request.scheme = "http"
request.method = 'POST'
request.session = DummySession({'csrf_token': 'foo'})
request.POST = {'csrf_token': 'bar'}
self.config.set_default_csrf_options(require_csrf=True)
view = self.config._derive_view(inner_view, require_csrf=False)
result = view(None, request)
self.assertTrue(result is response)
def test_csrf_view_skipped_by_default_on_exception_view(self):
from pyramid.request import Request
def view(request):
raise ValueError
def excview(request):
return 'hello'
self.config.set_default_csrf_options(require_csrf=True)
self.config.set_session_factory(
lambda request: DummySession({'csrf_token': 'foo'}))
self.config.add_view(view, name='foo', require_csrf=False)
self.config.add_view(excview, context=ValueError, renderer='string')
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
response = request.get_response(app)
self.assertTrue(b'hello' in response.body)
def test_csrf_view_failed_on_explicit_exception_view(self):
from pyramid.exceptions import BadCSRFToken
from pyramid.request import Request
def view(request):
raise ValueError
def excview(request): pass
self.config.set_default_csrf_options(require_csrf=True)
self.config.set_session_factory(
lambda request: DummySession({'csrf_token': 'foo'}))
self.config.add_view(view, name='foo', require_csrf=False)
self.config.add_view(excview, context=ValueError, renderer='string',
require_csrf=True)
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
try:
request.get_response(app)
except BadCSRFToken:
pass
else: # pragma: no cover
raise AssertionError
def test_csrf_view_passed_on_explicit_exception_view(self):
from pyramid.request import Request
def view(request):
raise ValueError
def excview(request):
return 'hello'
self.config.set_default_csrf_options(require_csrf=True)
self.config.set_session_factory(
lambda request: DummySession({'csrf_token': 'foo'}))
self.config.add_view(view, name='foo', require_csrf=False)
self.config.add_view(excview, context=ValueError, renderer='string',
require_csrf=True)
app = self.config.make_wsgi_app()
request = Request.blank('/foo', base_url='http://example.com')
request.method = 'POST'
request.headers['X-CSRF-Token'] = 'foo'
response = request.get_response(app)
self.assertTrue(b'hello' in response.body)
class TestDerivationOrder(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
self.config = None
testing.tearDown()
def test_right_order_user_sorted(self):
from pyramid.interfaces import IViewDerivers
self.config.add_view_deriver(None, 'deriv1')
self.config.add_view_deriver(None, 'deriv2', 'decorated_view', 'deriv1')
self.config.add_view_deriver(None, 'deriv3', 'deriv2', 'deriv1')
derivers = self.config.registry.getUtility(IViewDerivers)
derivers_sorted = derivers.sorted()
dlist = [d for (d, _) in derivers_sorted]
self.assertEqual([
'secured_view',
'csrf_view',
'owrapped_view',
'http_cached_view',
'decorated_view',
'deriv2',
'deriv3',
'deriv1',
'rendered_view',
'mapped_view',
], dlist)
def test_right_order_implicit(self):
from pyramid.interfaces import IViewDerivers
self.config.add_view_deriver(None, 'deriv1')
self.config.add_view_deriver(None, 'deriv2')
self.config.add_view_deriver(None, 'deriv3')
derivers = self.config.registry.getUtility(IViewDerivers)
derivers_sorted = derivers.sorted()
dlist = [d for (d, _) in derivers_sorted]
self.assertEqual([
'secured_view',
'csrf_view',
'owrapped_view',
'http_cached_view',
'decorated_view',
'deriv3',
'deriv2',
'deriv1',
'rendered_view',
'mapped_view',
], dlist)
def test_right_order_under_rendered_view(self):
from pyramid.interfaces import IViewDerivers
self.config.add_view_deriver(None, 'deriv1', 'rendered_view', 'mapped_view')
derivers = self.config.registry.getUtility(IViewDerivers)
derivers_sorted = derivers.sorted()
dlist = [d for (d, _) in derivers_sorted]
self.assertEqual([
'secured_view',
'csrf_view',
'owrapped_view',
'http_cached_view',
'decorated_view',
'rendered_view',
'deriv1',
'mapped_view',
], dlist)
def test_right_order_under_rendered_view_others(self):
from pyramid.interfaces import IViewDerivers
self.config.add_view_deriver(None, 'deriv1', 'rendered_view', 'mapped_view')
self.config.add_view_deriver(None, 'deriv2')
self.config.add_view_deriver(None, 'deriv3')
derivers = self.config.registry.getUtility(IViewDerivers)
derivers_sorted = derivers.sorted()
dlist = [d for (d, _) in derivers_sorted]
self.assertEqual([
'secured_view',
'csrf_view',
'owrapped_view',
'http_cached_view',
'decorated_view',
'deriv3',
'deriv2',
'rendered_view',
'deriv1',
'mapped_view',
], dlist)
class TestAddDeriver(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
self.config = None
testing.tearDown()
def test_add_single_deriver(self):
response = DummyResponse()
response.deriv = False
view = lambda *arg: response
def deriv(view, info):
self.assertFalse(response.deriv)
response.deriv = True
return view
result = self.config._derive_view(view)
self.assertFalse(response.deriv)
self.config.add_view_deriver(deriv, 'test_deriv')
result = self.config._derive_view(view)
self.assertTrue(response.deriv)
def test_override_deriver(self):
flags = {}
class AView:
def __init__(self):
self.response = DummyResponse()
def deriv1(view, info):
flags['deriv1'] = True
return view
def deriv2(view, info):
flags['deriv2'] = True
return view
view1 = AView()
self.config.add_view_deriver(deriv1, 'test_deriv')
result = self.config._derive_view(view1)
self.assertTrue(flags.get('deriv1'))
self.assertFalse(flags.get('deriv2'))
flags.clear()
view2 = AView()
self.config.add_view_deriver(deriv2, 'test_deriv')
result = self.config._derive_view(view2)
self.assertFalse(flags.get('deriv1'))
self.assertTrue(flags.get('deriv2'))
def test_override_mapped_view(self):
from pyramid.viewderivers import VIEW
response = DummyResponse()
view = lambda *arg: response
flags = {}
def deriv1(view, info):
flags['deriv1'] = True
return view
result = self.config._derive_view(view)
self.assertFalse(flags.get('deriv1'))
flags.clear()
self.config.add_view_deriver(
deriv1, name='mapped_view', under='rendered_view', over=VIEW)
result = self.config._derive_view(view)
self.assertTrue(flags.get('deriv1'))
def test_add_multi_derivers_ordered(self):
from pyramid.viewderivers import INGRESS
response = DummyResponse()
view = lambda *arg: response
response.deriv = []
def deriv1(view, info):
response.deriv.append('deriv1')
return view
def deriv2(view, info):
response.deriv.append('deriv2')
return view
def deriv3(view, info):
response.deriv.append('deriv3')
return view
self.config.add_view_deriver(deriv1, 'deriv1')
self.config.add_view_deriver(deriv2, 'deriv2', INGRESS, 'deriv1')
self.config.add_view_deriver(deriv3, 'deriv3', 'deriv2', 'deriv1')
result = self.config._derive_view(view)
self.assertEqual(response.deriv, ['deriv1', 'deriv3', 'deriv2'])
def test_add_deriver_without_name(self):
from pyramid.interfaces import IViewDerivers
def deriv1(view, info): pass
self.config.add_view_deriver(deriv1)
derivers = self.config.registry.getUtility(IViewDerivers)
self.assertTrue('deriv1' in derivers.names)
def test_add_deriver_reserves_ingress(self):
from pyramid.exceptions import ConfigurationError
from pyramid.viewderivers import INGRESS
def deriv1(view, info): pass
self.assertRaises(
ConfigurationError, self.config.add_view_deriver, deriv1, INGRESS)
def test_add_deriver_enforces_ingress_is_first(self):
from pyramid.exceptions import ConfigurationError
from pyramid.viewderivers import INGRESS
def deriv1(view, info): pass
try:
self.config.add_view_deriver(deriv1, over=INGRESS)
except ConfigurationError as ex:
self.assertTrue('cannot be over INGRESS' in ex.args[0])
else: # pragma: no cover
raise AssertionError
def test_add_deriver_enforces_view_is_last(self):
from pyramid.exceptions import ConfigurationError
from pyramid.viewderivers import VIEW
def deriv1(view, info): pass
try:
self.config.add_view_deriver(deriv1, under=VIEW)
except ConfigurationError as ex:
self.assertTrue('cannot be under VIEW' in ex.args[0])
else: # pragma: no cover
raise AssertionError
def test_add_deriver_enforces_mapped_view_is_last(self):
from pyramid.exceptions import ConfigurationError
def deriv1(view, info): pass
try:
self.config.add_view_deriver(deriv1, 'deriv1', under='mapped_view')
except ConfigurationError as ex:
self.assertTrue('cannot be under "mapped_view"' in ex.args[0])
else: # pragma: no cover
raise AssertionError
class TestDeriverIntegration(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
self.config = None
testing.tearDown()
def _getViewCallable(self, config, ctx_iface=None, request_iface=None,
name=''):
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
classifier = IViewClassifier
if ctx_iface is None:
ctx_iface = Interface
if request_iface is None:
request_iface = IRequest
return config.registry.adapters.lookup(
(classifier, request_iface, ctx_iface), IView, name=name,
default=None)
def _makeRequest(self, config):
request = DummyRequest()
request.registry = config.registry
return request
def test_view_options(self):
response = DummyResponse()
view = lambda *arg: response
response.deriv = []
def deriv1(view, info):
response.deriv.append(info.options['deriv1'])
return view
deriv1.options = ('deriv1',)
def deriv2(view, info):
response.deriv.append(info.options['deriv2'])
return view
deriv2.options = ('deriv2',)
self.config.add_view_deriver(deriv1, 'deriv1')
self.config.add_view_deriver(deriv2, 'deriv2')
self.config.add_view(view, deriv1='test1', deriv2='test2')
wrapper = self._getViewCallable(self.config)
request = self._makeRequest(self.config)
request.method = 'GET'
self.assertEqual(wrapper(None, request), response)
self.assertEqual(['test1', 'test2'], response.deriv)
def test_unexpected_view_options(self):
from pyramid.exceptions import ConfigurationError
def deriv1(view, info): pass
self.config.add_view_deriver(deriv1, 'deriv1')
self.assertRaises(
ConfigurationError,
lambda: self.config.add_view(lambda r: {}, deriv1='test1'))
@implementer(IResponse)
class DummyResponse(object):
content_type = None
default_content_type = None
body = None
class DummyRequest:
subpath = ()
matchdict = None
request_iface = IRequest
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
self.params = {}
self.POST = {}
self.cookies = {}
self.headers = {}
self.response = DummyResponse()
class DummyLogger:
def __init__(self):
self.messages = []
def info(self, msg):
self.messages.append(msg)
warn = info
debug = info
class DummySecurityPolicy:
def __init__(self, permitted=True):
self.permitted = permitted
def effective_principals(self, request):
return []
def permits(self, context, principals, permission):
return self.permitted
class DummySession(dict):
def get_csrf_token(self):
return self['csrf_token']
def parse_httpdate(s):
import datetime
# cannot use %Z, must use literal GMT; Jython honors timezone
# but CPython does not
return datetime.datetime.strptime(s, "%a, %d %b %Y %H:%M:%S GMT")
def assert_similar_datetime(one, two):
for attr in ('year', 'month', 'day', 'hour', 'minute'):
one_attr = getattr(one, attr)
two_attr = getattr(two, attr)
if not one_attr == two_attr: # pragma: no cover
raise AssertionError('%r != %r in %s' % (one_attr, two_attr, attr))
| 40.632312
| 84
| 0.633167
|
2eb1aa285d90f9682ed0aaf0c389501fd4f045ee
| 971
|
py
|
Python
|
blog/urls.py
|
samiksha-patil/Knowledge-Sharing-Platform
|
22e61a659d5ad63fe656fa639dc897cbdebad4fe
|
[
"bzip2-1.0.6"
] | 1
|
2021-05-09T08:18:49.000Z
|
2021-05-09T08:18:49.000Z
|
blog/urls.py
|
samiksha-patil/Knowledge-Sharing-Platform
|
22e61a659d5ad63fe656fa639dc897cbdebad4fe
|
[
"bzip2-1.0.6"
] | 9
|
2021-03-19T01:11:35.000Z
|
2022-03-12T00:20:13.000Z
|
blog/urls.py
|
samiksha-patil/Knowledge-Sharing-Platform
|
22e61a659d5ad63fe656fa639dc897cbdebad4fe
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.urls import path
from . import views
from django.conf.urls import url
from .views import PostListView,PostDetailView,PostCreateView,PostUpdateView,PostDeleteView,UserPostListView,UserAnswerListView
urlpatterns = [
path('',PostListView.as_view(),name='blog-home'),
path('post/<int:pk>/',PostDetailView.as_view(),name='post-detail'),
path('post/new/',PostCreateView.as_view(),name='post-create'),
path('post/<int:pk>/update/',PostUpdateView.as_view(),name='post-update'),
path('post/<int:pk>/delete/',PostDeleteView.as_view(),name='post-delete'),
path('user/<str:username>',UserPostListView.as_view(),name='user-posts'),
# path('post/<int:pk>/answer/',AnswerCreateView.as_view(), name='add_answer_to_post'),
path('post/<int:pk>/answer/', views.add_answer_to_post, name='add_answer_to_post'),
path('answer/<str:username>',UserAnswerListView.as_view(),name='user-answer'),
path('prof/<int:pk>',views.prof,name='user-prof'),
]
| 44.136364
| 127
| 0.729145
|
67185af64e290521a0643af8efc9fd89c4e3e344
| 1,531
|
py
|
Python
|
sa/profiles/DLink/DVG/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/DLink/DVG/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/DLink/DVG/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# DLink.DVG.ping
# ---------------------------------------------------------------------
# Copyright (C) 2007-2011 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.iping import IPing
class Script(BaseScript):
name = "DLink.DVG.ping"
interface = IPing
rx_result = re.compile(
r"^(?P<count>\d+) packets transmitted, (?P<success>\d+) "
r"(packets received|received), \d+% packet loss$",
re.MULTILINE,
)
rx_stat = re.compile(
r"^round-trip min/avg/max = (?P<min>.+)/(?P<avg>.+)/(?P<max>.+) ms$", re.MULTILINE
)
def execute(self, address, count=None, source_address=None, size=None, df=None):
cmd = "ping %s" % address
if count:
cmd += " %d" % int(count)
else:
cmd += " 5"
if size:
cmd += " %d" % int(size)
else:
cmd += " 64"
ping = self.cli(cmd)
result = self.rx_result.search(ping)
if result:
r = {"success": result.group("success"), "count": result.group("count")}
else:
raise self.NotSupportedError()
stat = self.rx_stat.search(ping)
if stat:
r.update({"min": stat.group("min"), "avg": stat.group("avg"), "max": stat.group("max")})
return r
| 31.244898
| 100
| 0.472894
|
9a129b601df40ff4df214bf709dcf1c690cd8c54
| 13,619
|
py
|
Python
|
appion/bin/pyace2.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
appion/bin/pyace2.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
appion/bin/pyace2.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
#pythonlib
import os
import re
import sys
import math
import time
import glob
import numpy
import shutil
import subprocess
#appion
from appionlib import apFile
from appionlib import apParam
from appionlib import apImage
from appionlib import apDisplay
from appionlib import apDatabase
from appionlib import appiondata
from appionlib import appionLoop2
from appionlib import apInstrument
from appionlib.apCtf import ctfdb
from appionlib.apCtf import ctfinsert
# other myami
from pyami import mrc, primefactor, imagefun
class Ace2Loop(appionLoop2.AppionLoop):
"""
appion Loop function that
runs Craig's ace2 program
to estimate the CTF in images
"""
#======================
def setProcessingDirName(self):
self.processdirname = "ctf"
#======================
def preLoopFunctions(self):
self.powerspecdir = os.path.join(self.params['rundir'], "opimages")
apParam.createDirectory(self.powerspecdir, warning=False)
self.ace2exe = self.getACE2Path()
self.ctfrundata = None
return
#======================
def getACE2Path(self):
exename = 'ace2.exe'
ace2exe = subprocess.Popen("which "+exename, shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if not os.path.isfile(ace2exe):
ace2exe = os.path.join(apParam.getAppionDirectory(), 'bin', exename)
if not os.path.isfile(ace2exe):
apDisplay.printError(exename+" was not found at: "+apParam.getAppionDirectory())
return ace2exe
#======================
def postLoopFunctions(self):
pattern = os.path.join(self.params['rundir'], self.params['sessionname']+'*.corrected.mrc')
apFile.removeFilePattern(pattern)
ctfdb.printCtfSummary(self.params, self.imgtree)
#======================
def reprocessImage(self, imgdata):
"""
Returns
True, if an image should be reprocessed
False, if an image was processed and should NOT be reprocessed
None, if image has not yet been processed
e.g. a confidence less than 80%
"""
if self.params['reprocess'] is None:
return True
ctfvalue = ctfdb.getBestCtfByResolution(imgdata, msg=False)
if ctfvalue is None:
return True
if conf > self.params['reprocess']:
# small, unbinned images can give same defocus values for 1 & 2:
if self.params['bin'] == 1 or ctfvalue['defocus1'] != ctfvalue['defocus2']:
return False
return True
#======================
def processImage(self, imgdata):
self.ctfvalues = {}
bestdef = ctfdb.getBestCtfByResolution(imgdata, msg=True)
apix = apDatabase.getPixelSize(imgdata)
if (not (self.params['onepass'] and self.params['zeropass'])):
maskhighpass = False
ace2inputpath = os.path.join(imgdata['session']['image path'],imgdata['filename']+".mrc")
else:
maskhighpass = True
filterimg = apImage.maskHighPassFilter(imgdata['image'],apix,1,self.params['zeropass'],self.params['onepass'])
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
mrc.write(filterimg,ace2inputpath)
# make sure that the image is a square
dimx = imgdata['camera']['dimension']['x']
dimy = imgdata['camera']['dimension']['y']
if dimx != dimy:
dims = [dimx,dimy]
dims.sort()
apDisplay.printMsg("resizing image: %ix%i to %ix%i" % (dimx,dimy,dims[0],dims[0]))
mrcarray = apImage.mrcToArray(ace2inputpath,msg=False)
clippedmrc = apImage.frame_cut(mrcarray,[dims[0],dims[0]])
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
apImage.arrayToMrc(clippedmrc,ace2inputpath,msg=False)
### pad out image to speed up FFT calculations for non-standard image sizes
print "checking prime factor"
if primefactor.isGoodStack(dimx) is False:
goodsize = primefactor.getNextEvenPrime(dimx)
factor = float(goodsize) / float(dimx)
apDisplay.printMsg("padding image: %ix%i to %ix%i" % (dimx,dimy,dimx*factor,dimy*factor))
mrcarray = apImage.mrcToArray(ace2inputpath,msg=False)
# paddedmrc = imagefun.pad(mrcarray, None, factor)
paddedmrc = apImage.frame_constant(mrcarray, (dimx*factor,dimy*factor), cval=mrcarray.mean())
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
apImage.arrayToMrc(paddedmrc,ace2inputpath,msg=False)
inputparams = {
'input': ace2inputpath,
'cs': self.params['cs'],
'kv': imgdata['scope']['high tension']/1000.0,
'apix': apix,
'binby': self.params['bin'],
}
### make standard input for ACE 2
apDisplay.printMsg("Ace2 executable: "+self.ace2exe)
commandline = ( self.ace2exe
+ " -i " + str(inputparams['input'])
+ " -b " + str(inputparams['binby'])
+ " -c " + str(inputparams['cs'])
+ " -k " + str(inputparams['kv'])
+ " -a " + str(inputparams['apix'])
+ " -e " + str(self.params['edge_b'])+","+str(self.params['edge_t'])
+ " -r " + str(self.params['rotblur'])
+ "\n" )
### run ace2
apDisplay.printMsg("running ace2 at "+time.asctime())
apDisplay.printColor(commandline, "purple")
t0 = time.time()
if self.params['verbose'] is True:
ace2proc = subprocess.Popen(commandline, shell=True)
else:
aceoutf = open("ace2.out", "a")
aceerrf = open("ace2.err", "a")
ace2proc = subprocess.Popen(commandline, shell=True, stderr=aceerrf, stdout=aceoutf)
ace2proc.wait()
### check if ace2 worked
basename = os.path.basename(ace2inputpath)
imagelog = basename+".ctf.txt"
if not os.path.isfile(imagelog) and self.stats['count'] <= 1:
### ace2 always crashes on first image??? .fft_wisdom file??
time.sleep(1)
if self.params['verbose'] is True:
ace2proc = subprocess.Popen(commandline, shell=True)
else:
aceoutf = open("ace2.out", "a")
aceerrf = open("ace2.err", "a")
ace2proc = subprocess.Popen(commandline, shell=True, stderr=aceerrf, stdout=aceoutf)
ace2proc.wait()
if self.params['verbose'] is False:
aceoutf.close()
aceerrf.close()
if not os.path.isfile(imagelog):
lddcmd = "ldd "+self.ace2exe
lddproc = subprocess.Popen(lddcmd, shell=True)
lddproc.wait()
apDisplay.printError("ace2 did not run")
apDisplay.printMsg("ace2 completed in " + apDisplay.timeString(time.time()-t0))
### parse log file
self.ctfvalues = {
'cs': self.params['cs'],
'volts': imgdata['scope']['high tension'],
}
logf = open(imagelog, "r")
apDisplay.printMsg("reading log file %s"%(imagelog))
for line in logf:
sline = line.strip()
if re.search("^Final Defocus: ", sline):
### old ACE2
apDisplay.printError("This old version of ACE2 has a bug in the astigmastism, please upgrade ACE2 now")
#parts = sline.split()
#self.ctfvalues['defocus1'] = float(parts[2])
#self.ctfvalues['defocus2'] = float(parts[3])
### convert to degrees
#self.ctfvalues['angle_astigmatism'] = math.degrees(float(parts[4]))
elif re.search("^Final Defocus \(m,m,deg\):", sline):
### new ACE2
apDisplay.printMsg("Reading new ACE2 defocus")
parts = sline.split()
#print parts
self.ctfvalues['defocus1'] = float(parts[3])
self.ctfvalues['defocus2'] = float(parts[4])
# ace2 defines negative angle from +x toward +y
self.ctfvalues['angle_astigmatism'] = -float(parts[5])
elif re.search("^Amplitude Contrast:",sline):
parts = sline.split()
self.ctfvalues['amplitude_contrast'] = float(parts[2])
elif re.search("^Confidence:",sline):
parts = sline.split()
self.ctfvalues['confidence'] = float(parts[1])
self.ctfvalues['confidence_d'] = float(parts[1])
logf.close()
### summary stats
apDisplay.printMsg("============")
avgdf = (self.ctfvalues['defocus1']+self.ctfvalues['defocus2'])/2.0
ampconst = 100.0*self.ctfvalues['amplitude_contrast']
pererror = 100.0 * (self.ctfvalues['defocus1']-self.ctfvalues['defocus2']) / avgdf
apDisplay.printMsg("Defocus: %.3f x %.3f um (%.2f percent astigmatism)"%
(self.ctfvalues['defocus1']*1.0e6, self.ctfvalues['defocus2']*1.0e6, pererror ))
apDisplay.printMsg("Angle astigmatism: %.2f degrees"%(self.ctfvalues['angle_astigmatism']))
apDisplay.printMsg("Amplitude contrast: %.2f percent"%(ampconst))
apDisplay.printColor("Final confidence: %.3f"%(self.ctfvalues['confidence']),'cyan')
### double check that the values are reasonable
if avgdf > self.params['maxdefocus'] or avgdf < self.params['mindefocus']:
apDisplay.printWarning("bad defocus estimate, not committing values to database")
self.badprocess = True
if ampconst < 0.0 or ampconst > 80.0:
apDisplay.printWarning("bad amplitude contrast, not committing values to database")
self.badprocess = True
if self.ctfvalues['confidence'] < 0.2:
apDisplay.printWarning("bad confidence value, not committing values to database")
self.badprocess = True
## create power spectra jpeg
mrcfile = imgdata['filename']+".mrc.edge.mrc"
if os.path.isfile(mrcfile):
jpegfile = os.path.join(self.powerspecdir, apDisplay.short(imgdata['filename'])+".jpg")
ps = apImage.mrcToArray(mrcfile,msg=False)
c = numpy.array(ps.shape)/2.0
ps[c[0]-0,c[1]-0] = ps.mean()
ps[c[0]-1,c[1]-0] = ps.mean()
ps[c[0]-0,c[1]-1] = ps.mean()
ps[c[0]-1,c[1]-1] = ps.mean()
#print "%.3f -- %.3f -- %.3f"%(ps.min(), ps.mean(), ps.max())
ps = numpy.log(ps+1.0)
ps = (ps-ps.mean())/ps.std()
cutoff = -2.0*ps.min()
ps = numpy.where(ps > cutoff, cutoff, ps)
cutoff = ps.mean()
ps = numpy.where(ps < cutoff, cutoff, ps)
#print "%.3f -- %.3f -- %.3f"%(ps.min(), ps.mean(), ps.max())
apImage.arrayToJpeg(ps, jpegfile, msg=False)
apFile.removeFile(mrcfile)
self.ctfvalues['graph3'] = jpegfile
otherfiles = glob.glob(imgdata['filename']+".*.txt")
### remove extra debugging files
for filename in otherfiles:
if filename[-9:] == ".norm.txt":
continue
elif filename[-8:] == ".ctf.txt":
continue
else:
apFile.removeFile(filename)
if maskhighpass and os.path.isfile(ace2inputpath):
apFile.removeFile(ace2inputpath)
return
#======================
def commitToDatabase(self, imgdata):
if self.ctfrundata is None:
self.insertRunData()
ctfinsert.validateAndInsertCTFData(imgdata, self.ctfvalues, self.ctfrundata, self.params['rundir'])
return True
#======================
def insertRunData(self):
paramq = appiondata.ApAce2ParamsData()
paramq['bin'] = self.params['bin']
paramq['reprocess'] = self.params['reprocess']
paramq['cs'] = self.params['cs']
paramq['stig'] = True
paramq['min_defocus'] = self.params['mindefocus']
paramq['max_defocus'] = self.params['maxdefocus']
paramq['edge_thresh'] = self.params['edge_t']
paramq['edge_blur'] = self.params['edge_b']
paramq['rot_blur'] = self.params['rotblur']
paramq['refine2d'] = self.params['refine2d']
paramq['onepass'] = self.params['onepass']
paramq['zeropass'] = self.params['zeropass']
runq=appiondata.ApAceRunData()
runq['name'] = self.params['runname']
runq['session'] = self.getSessionData()
runq['hidden'] = False
runq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
runq['ace2_params'] = paramq
runq.insert()
self.ctfrundata = runq
#======================
def setupParserOptions(self):
### values
self.parser.add_option("-b", "--bin", dest="bin", type="int", default=1,
help="Binning of the image before FFT", metavar="#")
self.parser.add_option("--mindefocus", dest="mindefocus", type="float", default=0.1e-6,
help="Minimal acceptable defocus (in meters)", metavar="#")
self.parser.add_option("--maxdefocus", dest="maxdefocus", type="float", default=15e-6,
help="Maximal acceptable defocus (in meters)", metavar="#")
self.parser.add_option("--edge1", dest="edge_b", type="float", default=12.0,
help="Canny edge parameters Blur Sigma", metavar="#")
self.parser.add_option("--edge2", dest="edge_t", type="float", default=0.001,
help="Canny edge parameters Edge Treshold(0.0-1.0)", metavar="#")
self.parser.add_option("--rotblur", dest="rotblur", type="float", default=0.0,
help="Rotational blur for low contrast CTF (in degrees), default 0", metavar="#")
### true/false
self.parser.add_option("--refine2d", dest="refine2d", default=False,
action="store_true", help="Refine the defocus after initial ACE with 2d cross-correlation")
self.parser.add_option("--verbose", dest="verbose", default=True,
action="store_true", help="Show all ace2 messages")
self.parser.add_option("--quiet", dest="verbose", default=True,
action="store_false", help="Hide all ace2 messages")
self.parser.add_option("--onepass", dest="onepass", type="float",
help="Mask High pass filter radius for end of gradient mask in Angstroms", metavar="FLOAT")
self.parser.add_option("--zeropass", dest="zeropass", type="float",
help="Mask High pass filter radius for zero mask in Angstroms", metavar="FLOAT")
#self.parser.add_option("--refineapix", dest="refineapix", default=False,
# action="store_true", help="Refine the pixel size")
#======================
def checkConflicts(self):
if self.params['bin'] < 1:
apDisplay.printError("bin must be positive")
if (self.params['mindefocus'] is not None and
(self.params['mindefocus'] > 1e-3 or self.params['mindefocus'] < 1e-9)):
apDisplay.printError("min defocus is not in an acceptable range, e.g. mindefocus=1.5e-6")
if (self.params['maxdefocus'] is not None and
(self.params['maxdefocus'] > 1e-3 or self.params['maxdefocus'] < 1e-9)):
apDisplay.printError("max defocus is not in an acceptable range, e.g. maxdefocus=1.5e-6")
### set cs value
self.params['cs'] = apInstrument.getCsValueFromSession(self.getSessionData())
return
if __name__ == '__main__':
imgLoop = Ace2Loop()
imgLoop.run()
| 37.414835
| 113
| 0.677363
|
8228e15dc3fca8b2a2bca9452ab2b2c4b3132354
| 225
|
py
|
Python
|
media_management/media_management/doctype/film_element/test_film_element.py
|
ashish-greycube/media_management
|
1f295b8ca3c54c51f3be2b48d1976c94455767b0
|
[
"MIT"
] | null | null | null |
media_management/media_management/doctype/film_element/test_film_element.py
|
ashish-greycube/media_management
|
1f295b8ca3c54c51f3be2b48d1976c94455767b0
|
[
"MIT"
] | null | null | null |
media_management/media_management/doctype/film_element/test_film_element.py
|
ashish-greycube/media_management
|
1f295b8ca3c54c51f3be2b48d1976c94455767b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, GreyCube Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestFilmElement(unittest.TestCase):
pass
| 20.454545
| 60
| 0.773333
|
0533b0234631e0a72023c7824a3a4ac5fd6276e5
| 808
|
py
|
Python
|
medianeg.py
|
lessen/src
|
bc09a33d22e942214df5608806b11370e21ce7e8
|
[
"MIT"
] | null | null | null |
medianeg.py
|
lessen/src
|
bc09a33d22e942214df5608806b11370e21ce7e8
|
[
"MIT"
] | 1
|
2016-12-28T22:44:52.000Z
|
2016-12-28T22:44:52.000Z
|
medianeg.py
|
lessen/src
|
bc09a33d22e942214df5608806b11370e21ce7e8
|
[
"MIT"
] | null | null | null |
from median import median
from eg import eg
import time
from random import random as r
@eg
def _med():
"pre-sorting lists only really useful for large lists (over 10,000) items"
assert median([1,2,3,4,5]) == 3
assert median([1,2,4,5]) == 3
for pow in range(2,7):
slow=fast=0
for i in range(100):
size=int(r()*10**pow)
if size < 1: continue
lst1 = [x for x in range(size)]
lst2 = sorted(lst1[:])
t1 = time.process_time()
n1 = median(lst1,ordered=False)
t2 = time.process_time()
n2 = median(lst2,ordered=True)
t3 = time.process_time()
assert n1==n2
fast += t3-t2
slow += t2-t1
print("size: %10s slow: %8.5f fast: %8.5f slow/fast: %8.5f " % (
10**pow,slow, fast, slow/fast))
if __name__ == "__main__": eg()
| 26.933333
| 76
| 0.591584
|
1bf623c80050edba6f7926bb1bf182f6216e54e0
| 470
|
py
|
Python
|
Aulasatualizada/execicio_curso em video/execicio_90_dicionario_em_python.py
|
swellington231/AulaPaython
|
7b72ddec4d85f4660c0c395de07a133993aa2c70
|
[
"MIT"
] | null | null | null |
Aulasatualizada/execicio_curso em video/execicio_90_dicionario_em_python.py
|
swellington231/AulaPaython
|
7b72ddec4d85f4660c0c395de07a133993aa2c70
|
[
"MIT"
] | null | null | null |
Aulasatualizada/execicio_curso em video/execicio_90_dicionario_em_python.py
|
swellington231/AulaPaython
|
7b72ddec4d85f4660c0c395de07a133993aa2c70
|
[
"MIT"
] | null | null | null |
aluno = dict()
aluno['nome'] = str(input('Nome: '))
aluno['média'] = float(input(f'Méida do {aluno["nome"]}: '))
if aluno['média'] >= 7:
aluno['Situação '] = 'Aprovado'
elif 5 <= aluno['média'] < 7:
aluno['Situação'] = 'recuperação'
else:
aluno['Situação'] = 'reprovado'
for k , v in aluno.items():
print(f'{k} é igual a {v}')
#
# print(f'O aluno {aluno["nome"]}')
# print(f'A média foi {aluno["média"]}')
# print(f'A Situação foi {aluno["situação"]}')
| 27.647059
| 60
| 0.587234
|
3b118e1063e8bc7eaabd2d0c9af22a9146ed4883
| 135,948
|
py
|
Python
|
env/lib/python3.6/site-packages/autopep8.py
|
phoebekaranja/Pitch1
|
6f64b8a330db35af0462ebdb6517cd99da810baa
|
[
"MIT"
] | null | null | null |
env/lib/python3.6/site-packages/autopep8.py
|
phoebekaranja/Pitch1
|
6f64b8a330db35af0462ebdb6517cd99da810baa
|
[
"MIT"
] | 6
|
2020-06-05T19:04:55.000Z
|
2021-12-13T19:50:25.000Z
|
virtual/lib/python3.6/site-packages/autopep8.py
|
LoiseMwarangu/Instagram
|
1bb0791a69350e0b9a3d2864d7132c6a605360d7
|
[
"MIT"
] | 1
|
2019-01-17T07:17:31.000Z
|
2019-01-17T07:17:31.000Z
|
#!/usr/bin/env python
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pycodestyle should be added as methods to FixPEP8. See the
class documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import keyword
import locale
import os
import re
import signal
import sys
import textwrap
import token
import tokenize
import pycodestyle
try:
unicode
except NameError:
unicode = str
__version__ = '1.4'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
LAMBDA_REGEX = re.compile(r'([\w.]+)\s=\slambda\s*([\(\)=\w,\s.]*):')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+([^][)(}{]+?)\s+(in|is)\s')
COMPARE_NEGATIVE_REGEX_THROUGH = re.compile(r'\b(not\s+in|is\s+not)\s')
BARE_EXCEPT_REGEX = re.compile(r'except\s*:')
STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\s.*\):')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E226,E24,W503,W690' # TODO: use pycodestyle.DEFAULT_IGNORE
DEFAULT_INDENT_SIZE = 4
SELECTED_GLOBAL_FIXED_METHOD_CODES = ['W602', ]
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
MAX_PYTHON_FILE_DETECTION_BYTES = 1024
def open_with_encoding(filename,
encoding=None, mode='r', limit_byte_check=-1):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename, limit_byte_check=-1):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
with open_with_encoding(filename, encoding) as test_file:
test_file.read(limit_byte_check)
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('def '):
if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({})'.format(blank_lines))
elif pycodestyle.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pycodestyle.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa):
"""Override pycodestyle's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pycodestyle.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {}'.format(indent[depth]))
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield (start, 'E133 {}'.format(indent[depth]))
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket and not hang_closing:
yield (start, 'E123 {}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif not close_bracket and hangs[depth]:
error = ('E131', one_indented)
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{} {}'.format(*error))
# Look for visual indenting.
if (
parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]
):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE
if visual_indent:
yield (pos, 'E129 {}'.format(desired_indent))
else:
yield (pos, 'E125 {}'.format(desired_indent))
del pycodestyle._checks['logical_line'][pycodestyle.continued_indentation]
pycodestyle.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pycodestyle. The second argument, "logical", is required only for
logical-line fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pycodestyle error was modified. Note that the modified
line numbers that are returned are indexed at 1. This typically would
correspond with the line number reported in the pycodestyle error
information.
[fixed method list]
- e111,e114,e115,e116
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251,e252
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303,e304,e305,e306
- e401
- e502
- e701,e702,e703,e704
- e711,e712,e713,e714
- e722
- e731
- w291
- w503
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pycodestyle categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e133 = self.fix_e131
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e252 = self.fix_e225
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e306 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self.fix_w293 = self.fix_w291
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(_get_parameters(fix)) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {error} on line {line}'.format(
error=result['id'], line=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {}:{}:{}:{}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
'hang_closing': self.options.hang_closing,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix unexpected indentation."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
self.source[line_index] = indent[1:] + stripped
def fix_e116(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e131(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
if spaces_to_add >= 0:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
else:
offset = abs(spaces_to_add)
self.source[line_index] = self.source[line_index][offset:]
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
error_code = result.get('id', 0)
try:
ts = generate_tokens(fixed)
except (SyntaxError, tokenize.TokenError):
return
if not check_syntax(fixed.lstrip()):
return
errors = list(
pycodestyle.missing_whitespace_around_operator(fixed, ts))
for e in reversed(errors):
if error_code != e[1].split()[0]:
continue
offset = e[0][1]
fixed = fixed[:offset] + ' ' + fixed[offset:]
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset].rstrip() + ' ' + target[offset:].lstrip()
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pycodestyle sometimes reports columns that
# goes past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pycodestyle reports an offset line number if
# there are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e305(self, result):
"""Add missing 2 blank lines after end of function or class."""
add_delete_linenum = 2 - int(result['info'].split()[-1])
cnt = 0
offset = result['line'] - 2
modified_lines = []
if add_delete_linenum < 0:
# delete cr
add_delete_linenum = abs(add_delete_linenum)
while cnt < add_delete_linenum and offset >= 0:
if not self.source[offset].strip():
self.source[offset] = ''
modified_lines.append(1 + offset) # Line indexed at 1
cnt += 1
offset -= 1
else:
# add cr
cr = '\n'
# check comment line
while True:
if offset < 0:
break
line = self.source[offset].lstrip()
if not line:
break
if line[0] != '#':
break
offset -= 1
offset += 1
self.source[offset] = cr + self.source[offset]
modified_lines.append(1 + offset) # Line indexed at 1.
return modified_lines
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
if self.options.aggressive:
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
return []
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
# Avoid applying this when indented.
# https://docs.python.org/reference/compound_stmts.html
for line in logical_lines:
if (result['id'] == 'E702' and ':' in line
and STARTSWITH_DEF_REGEX.match(line)):
return []
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# Find inline comment.
inline_comment = None
if target[offset:].lstrip(';').lstrip()[:2] == '# ':
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e704(self, result):
"""Fix multiple statements on one line def"""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
match = STARTSWITH_DEF_REGEX.match(target)
if match:
self.source[line_index] = '{}\n{}{}'.format(
match.group(0),
_get_indentation(target) + self.indent_word,
target[match.end(0):].lstrip())
def fix_e711(self, result):
"""Fix comparison with None."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix (trivial case of) non-membership check."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# to convert once 'not in' -> 'in'
before_target = target[:offset]
target = target[offset:]
match_notin = COMPARE_NEGATIVE_REGEX_THROUGH.search(target)
notin_pos_start, notin_pos_end = 0, 0
if match_notin:
notin_pos_start = match_notin.start(1)
notin_pos_end = match_notin.end()
target = '{}{} {}'.format(
target[:notin_pos_start], 'in', target[notin_pos_end:])
# fix 'not in'
match = COMPARE_NEGATIVE_REGEX.search(target)
if match:
if match.group(3) == 'in':
pos_start = match.start(1)
new_target = '{5}{0}{1} {2} {3} {4}'.format(
target[:pos_start], match.group(2), match.group(1),
match.group(3), target[match.end():], before_target)
if match_notin:
# revert 'in' -> 'not in'
pos_start = notin_pos_start + offset
pos_end = notin_pos_end + offset - 4 # len('not ')
new_target = '{}{} {}'.format(
new_target[:pos_start], 'not in', new_target[pos_end:])
self.source[line_index] = new_target
def fix_e714(self, result):
"""Fix object identity should be 'is not' case."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# to convert once 'is not' -> 'is'
before_target = target[:offset]
target = target[offset:]
match_isnot = COMPARE_NEGATIVE_REGEX_THROUGH.search(target)
isnot_pos_start, isnot_pos_end = 0, 0
if match_isnot:
isnot_pos_start = match_isnot.start(1)
isnot_pos_end = match_isnot.end()
target = '{}{} {}'.format(
target[:isnot_pos_start], 'in', target[isnot_pos_end:])
match = COMPARE_NEGATIVE_REGEX.search(target)
if match:
if match.group(3).startswith('is'):
pos_start = match.start(1)
new_target = '{5}{0}{1} {2} {3} {4}'.format(
target[:pos_start], match.group(2), match.group(3),
match.group(1), target[match.end():], before_target)
if match_isnot:
# revert 'is' -> 'is not'
pos_start = isnot_pos_start + offset
pos_end = isnot_pos_end + offset - 4 # len('not ')
new_target = '{}{} {}'.format(
new_target[:pos_start], 'is not', new_target[pos_end:])
self.source[line_index] = new_target
def fix_e722(self, result):
"""fix bare except"""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
match = BARE_EXCEPT_REGEX.search(target)
if match:
self.source[line_index] = '{}{}{}'.format(
target[:result['column'] - 1], "except BaseException:",
target[match.end():])
def fix_e731(self, result):
"""Fix do not assign a lambda expression check."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
match = LAMBDA_REGEX.search(target)
if match:
end = match.end()
self.source[line_index] = '{}def {}({}): return {}'.format(
target[:match.start(0)], match.group(1), match.group(2),
target[end:].lstrip())
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def fix_w391(self, _):
"""Remove trailing blank lines."""
blank_count = 0
for line in reversed(self.source):
line = line.rstrip()
if line:
break
else:
blank_count += 1
original_length = len(self.source)
self.source = self.source[:original_length - blank_count]
return range(1, 1 + original_length)
def fix_w503(self, result):
(line_index, _, target) = get_index_offset_contents(result,
self.source)
one_string_token = target.split()[0]
try:
ts = generate_tokens(one_string_token)
except (SyntaxError, tokenize.TokenError):
return
if not _is_binary_operator(ts[0][0], one_string_token):
return
# find comment
comment_index = 0
for i in range(5):
# NOTE: try to parse code in 5 times
if (line_index - i) < 0:
break
from_index = line_index - i - 1
to_index = line_index + 1
try:
ts = generate_tokens("".join(self.source[from_index:to_index]))
except (SyntaxError, tokenize.TokenError):
continue
newline_count = 0
newline_index = []
for index, t in enumerate(ts):
if t[0] in (tokenize.NEWLINE, tokenize.NL):
newline_index.append(index)
newline_count += 1
if newline_count > 2:
tts = ts[newline_index[-3]:]
else:
tts = ts
old = []
for t in tts:
if tokenize.COMMENT == t[0] and old:
comment_index = old[3][1]
break
old = t
break
i = target.index(one_string_token)
self.source[line_index] = '{}{}'.format(
target[:i], target[i + len(one_string_token):].lstrip())
nl = find_newline(self.source[line_index - 1:line_index])
before_line = self.source[line_index - 1]
bl = before_line.index(nl)
if comment_index:
self.source[line_index - 1] = '{} {} {}'.format(
before_line[:comment_index], one_string_token,
before_line[comment_index + 1:])
else:
self.source[line_index - 1] = '{} {}{}'.format(
before_line[:bl], one_string_token, before_line[bl:])
def fix_w605(self, result):
(line_index, _, target) = get_index_offset_contents(result,
self.source)
tokens = list(generate_tokens(target))
for (pos, _msg) in get_w605_position(tokens):
self.source[line_index] = '{}r{}'.format(
target[:pos], target[pos:])
def get_w605_position(tokens):
"""workaround get pointing out position by W605."""
# TODO: When this PR(*) change is released, use pos of pycodestyle
# *: https://github.com/PyCQA/pycodestyle/pull/747
valid = [
'\n', '\\', '\'', '"', 'a', 'b', 'f', 'n', 'r', 't', 'v',
'0', '1', '2', '3', '4', '5', '6', '7', 'x',
# Escape sequences only recognized in string literals
'N', 'u', 'U',
]
for token_type, text, start, end, line in tokens:
if token_type == tokenize.STRING:
quote = text[-3:] if text[-3:] in ('"""', "'''") else text[-1]
# Extract string modifiers (e.g. u or r)
quote_pos = text.index(quote)
prefix = text[:quote_pos].lower()
start = quote_pos + len(quote)
string = text[start:-len(quote)]
if 'r' not in prefix:
pos = string.find('\\')
while pos >= 0:
pos += 1
if string[pos] not in valid:
yield (
line.find(text),
"W605 invalid escape sequence '\\%s'" %
string[pos],
)
pos = string.find('\\', pos + 1)
def get_index_offset_contents(result, source):
"""Return (line_index, column_offset, line_contents)."""
line_index = result['line'] - 1
return (line_index,
result['column'] - 1,
source[line_index])
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
assert not target.lstrip().startswith('#')
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(
x,
indent_word,
max_line_length,
experimental=experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=wrap_output(sys.stderr, 'utf-8'))
if candidates:
best_candidate = candidates[0]
# Don't allow things to get longer.
if longest_line_length(best_candidate) > longest_line_length(original):
return None
return best_candidate
def longest_line_length(code):
"""Return length of longest line."""
return max(len(line) for line in code.splitlines())
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text.rstrip()
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for (index, _) in enumerate(split_a):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers and
not pycodestyle.noqa(line)
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
pos = next((index for index, c in enumerate(line)
if c != '#'))
if (
# Leave multiple spaces like '# ' alone.
(line[:pos].count('#') > 1 or line[1].isalnum() or
not line[1].isspace()) and
line[1] not in ':!' and
# Leave stylistic outlined blocks alone.
not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None, filename=''):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names,
filename=filename)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore, where='', verbose=False):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
if verbose:
print('---> Applying {} fix for {}'.format(where,
code.upper()),
file=sys.stderr)
fixes |= set(fix)
return fixes
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename='',
where='global', verbose=False):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore,
where=where,
verbose=verbose),
filename=filename)
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'], ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501',
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if (first.rstrip().endswith('(') and
source[end_offset:].lstrip().startswith(')')):
pass
elif first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
def _is_binary_operator(token_type, text):
return ((token_type == tokenize.OP or text in ['and', 'or']) and
text not in '()[]{},:.;@=%~')
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for current_item in reversed(self._lines):
if (
last_space and
(not isinstance(current_item, Atom) or
not current_item.is_colon)
):
break
else:
last_space = None
if isinstance(current_item, self._Space):
last_space = current_item
if isinstance(current_item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
return line + ' pass'
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pycodestyle via python method calls."""
class QuietReport(pycodestyle.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, check):
"""Collect errors."""
code = super(QuietReport, self).error(line_number,
offset,
text,
check)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pycodestyle.Checker('', lines=source, reporter=QuietReport,
**pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
# ignore FF in first lstrip()
return line.lstrip(' \t\v').rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
for line_number, line in enumerate(source_lines, start=1):
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line_number, line in enumerate(lines[thisstmt:nextstmt],
start=thisstmt):
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment
lines, as a signal that tokenize doesn't know what to do about them;
indeed, they're our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names, filename=''):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
# The name parameter is necessary particularly for the "import" fixer.
return unicode(tool.refactor_string(source_text, name=filename))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec', dont_inherit=True)
except (SyntaxError, TypeError, ValueError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pycodestyle.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'e72', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713', 'e714', 'w5')):
continue
if aggressive <= 2:
if issue_id.startswith(('e704', 'w5')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even
more clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None, apply_config=False):
"""Return fixed source code.
"encoding" will be used to decode "source" if it is a byte string.
"""
options = _get_options(options, apply_config)
if not isinstance(source, unicode):
source = source.decode(encoding or get_encoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
# Disable "apply_local_fixes()" for now due to issue #175.
fixed_source = tmp_source
else:
pep8_options = {
'ignore': options.ignore,
'select': options.select,
'max_line_length': options.max_line_length,
'hang_closing': options.hang_closing,
}
sio = io.StringIO(tmp_source)
contents = sio.readlines()
results = _execute_pep8(pep8_options, contents)
codes = {result['id'] for result in results
if result['id'] in SELECTED_GLOBAL_FIXED_METHOD_CODES}
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename,
codes=codes)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding, mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = _get_parameters(function)
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def _get_parameters(function):
# pylint: disable=deprecated-method
if sys.version_info.major >= 3:
# We need to match "getargspec()", which includes "self" as the first
# value for methods.
# https://bugs.python.org/issue17481#msg209469
if inspect.ismethod(function):
function = function.__func__
return list(inspect.signature(function).parameters)
else:
return inspect.getargspec(function)[0]
def apply_global_fixes(source, options, where='global', filename='',
codes=None):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pycodestyle).
"""
if codes is None:
codes = []
if any(code_match(code, select=options.select, ignore=options.ignore)
for code in ['E101', 'E111']):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code.upper() in SELECTED_GLOBAL_FIXED_METHOD_CODES \
and code.upper() not in codes:
continue
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {} fix for {}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore,
filename=filename,
where=where,
verbose=options.verbose)
return source
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def _get_package_version():
packages = ["pycodestyle: {}".format(pycodestyle.__version__)]
return ", ".join(packages)
def create_parser():
"""Return command-line parser."""
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s {} ({})'.format(
__version__, _get_package_version()))
parser.add_argument('-v', '--verbose', action='count',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('--global-config', metavar='filename',
default=DEFAULT_CONFIG,
help='path to a global pep8 config file; if this file '
'does not exist then this is ignored '
'(default: {})'.format(DEFAULT_CONFIG))
parser.add_argument('--ignore-local-config', action='store_true',
help="don't look for and apply local config files; "
'if not passed, defaults are updated with any '
"config files in the project's root directory")
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--line-range', '--range', metavar='line',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, help=argparse.SUPPRESS)
parser.add_argument('--hang-closing', action='store_true',
help='hang-closing option passed to pycodestyle')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments, apply_config=False):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if apply_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = {'E', 'W'}
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = {}
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def read_config(args, parser):
"""Read both user configuration and local configuration."""
try:
from configparser import ConfigParser as SafeConfigParser
from configparser import Error
except ImportError:
from ConfigParser import SafeConfigParser
from ConfigParser import Error
config = SafeConfigParser()
try:
config.read(args.global_config)
if not args.ignore_local_config:
parent = tail = args.files and os.path.abspath(
os.path.commonprefix(args.files))
while tail:
if config.read([os.path.join(parent, fn)
for fn in PROJECT_CONFIG]):
break
(parent, tail) = os.path.split(parent)
defaults = {}
option_list = {o.dest: o.type or type(o.default)
for o in parser._actions}
for section in ['pep8', 'pycodestyle', 'flake8']:
if not config.has_section(section):
continue
for (k, _) in config.items(section):
norm_opt = k.lstrip('-').replace('-', '_')
if not option_list.get(norm_opt):
continue
opt_type = option_list[norm_opt]
if opt_type is int:
value = config.getint(section, k)
elif opt_type is bool:
value = config.getboolean(section, k)
else:
value = config.get(section, k)
if args.verbose:
print("enable config: section={}, key={}, value={}".format(
section, k, value))
defaults[norm_opt] = value
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def _split_comma_separated(string):
"""Return a set of strings."""
return {text.strip() for text in string.split(',') if text.strip()}
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0] if docstring else ''
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.rstrip().split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if (
current_line.endswith(('.', '%', '+', '-', '/')) and
"': " in current_line
):
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Avoid the ugliness of "something[\n" and something[index][\n.
if (
current_line.endswith('[') and
len(current_line) > 1 and
(current_line[-2].isalnum() or current_line[-2] in ']')
):
rank += 300
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
# Avoid breaking at unary operators.
if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')):
rank += 1000
if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')):
rank += 1000
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
# Avoid splitting dictionaries between key and value.
if current_line.endswith(':'):
rank += 100
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard deviation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pycodestyle.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(
filename,
limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f:
text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES)
if not text:
return False
first_line = text.splitlines()[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def wrap_output(output, encoding):
"""Return output with specified encoding."""
return codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
def get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def main(argv=None, apply_config=True):
"""Command-line entry."""
if argv is None:
argv = sys.argv
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(argv[1:], apply_config=apply_config)
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
encoding = sys.stdin.encoding or get_encoding()
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
wrap_output(sys.stdout, encoding=encoding).write(
fix_code(sys.stdin.read(), args, encoding=encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main())
| 33.759126
| 79
| 0.554124
|
164b0b6736683c348cfd7260abda065f9fd219eb
| 57,827
|
py
|
Python
|
pyDEA/core/gui_modules/table_gui.py
|
olga-perederieieva/pyDEA
|
eac02bac901b9109efb5d6a3841809f70e378912
|
[
"MIT"
] | 29
|
2017-10-22T03:03:20.000Z
|
2022-03-21T09:15:22.000Z
|
pyDEA/core/gui_modules/table_gui.py
|
olga-perederieieva/pyDEA
|
eac02bac901b9109efb5d6a3841809f70e378912
|
[
"MIT"
] | 6
|
2018-07-18T01:40:43.000Z
|
2021-04-11T00:38:30.000Z
|
pyDEA/core/gui_modules/table_gui.py
|
olga-perederieieva/pyDEA
|
eac02bac901b9109efb5d6a3841809f70e378912
|
[
"MIT"
] | 20
|
2018-01-23T05:50:29.000Z
|
2022-02-22T05:04:56.000Z
|
''' This module contains classes responsible for displaying input data
in a table (TableFrame and TableFrameWithInputOutputBox).
It also contains many classes necessary for TableFrameWithInputOutputBox.
Attributes:
CELL_WIDTH (int): constant that defined width of a cell in a table
'''
from tkinter import S, N, E, W, END, VERTICAL, HORIZONTAL, ALL
from tkinter import IntVar, DISABLED, StringVar, NORMAL
from tkinter.ttk import Frame, Entry, Scrollbar, Checkbutton
from pyDEA.core.gui_modules.scrollable_frame_gui import MouseWheel
from pyDEA.core.utils.dea_utils import is_valid_coeff, NOT_VALID_COEFF, VALID_COEFF
from pyDEA.core.utils.dea_utils import WARNING_COEFF, EMPTY_COEFF, CELL_DESTROY
from pyDEA.core.utils.dea_utils import CHANGE_CATEGORY_NAME, INPUT_OBSERVER
from pyDEA.core.utils.dea_utils import OUTPUT_OBSERVER, on_canvas_resize
from pyDEA.core.utils.dea_utils import validate_category_name, calculate_nb_pages
from pyDEA.core.gui_modules.custom_canvas_gui import StyledCanvas
from pyDEA.core.data_processing.read_data import convert_to_dictionary
CELL_WIDTH = 10
class TableFrame(Frame):
''' This class is a base class that defines minimal functionality of
a table.
Attributes:
parent (Tk object): parent of this widget.
nb_rows (int): number of rows of the table.
nb_cols (int): number of columns of the table.
cells (list of list of Entry): list with Entry widgets
(or derivatives of Entry)
that describes the table and its content.
canvas (Canvas): canvas that holds all widgets
(it is necessary to make the table scrollable).
frame_with_table (Frame): frame that holds all widgets.
Args:
parent (Tk object): parent of this widget.
nb_rows (int, optional): number of rows of the table,
defaults to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, data, nb_rows=20, nb_cols=5):
Frame.__init__(self, parent)
self.data = data
self.parent = parent
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.cells = []
self.canvas = None
self.frame_with_table = None
self.create_widgets()
def create_widgets(self):
''' Creates all widgets.
'''
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
yScrollbar = Scrollbar(self, orient=VERTICAL)
yScrollbar.grid(row=0, column=1, sticky=N+S)
xScrollbar = Scrollbar(self, orient=HORIZONTAL)
xScrollbar.grid(row=1, column=0, sticky=E+W)
canvas = StyledCanvas(self, yscrollcommand=yScrollbar.set,
xscrollcommand=xScrollbar.set, bd=0)
self.canvas = canvas
canvas.grid(row=0, column=0, sticky=N+S+W+E)
frame_with_table = Frame(canvas)
self.frame_with_table = frame_with_table
frame_with_table.grid(sticky=N+S+W+E, pady=15, padx=3)
for i in range(2, self.nb_rows + 2):
cols = []
for j in range(1, self.nb_cols + 1):
ent = self.create_entry_widget(frame_with_table)
ent.grid(row=i, column=j, sticky=N+S+E+W)
cols.append(ent)
self.cells.append(cols)
canvas.create_window(0, 0, window=frame_with_table, anchor='nw')
canvas.update_idletasks()
yScrollbar['command'] = canvas.yview
xScrollbar['command'] = canvas.xview
self._update_scroll_region()
MouseWheel(self).add_scrolling(canvas, yscrollbar=yScrollbar)
def create_entry_widget(self, parent):
''' Creates Entry widget.
Args:
parent (Tk object): parent of the Entry widget.
Returns:
Entry: created Entry widget.
'''
return Entry(parent, width=CELL_WIDTH)
def add_row(self):
''' Adds one row to the end of the table.
'''
self.cells.append([])
for j in range(self.nb_cols):
grid_row_index = self.nb_rows + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=j + 1, sticky=N+S+E+W)
self.cells[self.nb_rows].append(ent)
self.nb_rows += 1
self._update_scroll_region()
def add_column(self):
''' Adds one column to the end of the table.
'''
for i in range(self.nb_rows):
grid_row_index = i + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=self.nb_cols + 1,
sticky=N+S+E+W)
self.cells[i].append(ent)
self.nb_cols += 1
self._update_scroll_region()
def remove_row(self, row_index):
''' Removes row with a specified index from the table.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
# forbid deleting first row
if self.should_remove_row(row_index):
for j in range(self.nb_cols):
self.before_cell_destroy(self.cells[row_index][j])
self.cells[row_index][j].destroy()
for i in range(row_index + 1, self.nb_rows):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(row=i + 1)
self.cells.remove(self.cells[row_index])
self.nb_rows -= 1
self._update_scroll_region()
return True
return False
def should_remove_row(self, row_index):
''' Checks if row with a specified row index can be removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row_index is >= 1 and < total number of rows,
False otherwise.
'''
return row_index >= 1 and row_index < self.nb_rows
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of
columns of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# do not allow to delete first column
if column_index > 0 and column_index < self.nb_cols:
for i in range(self.nb_rows):
self.cells[i][column_index].destroy()
for j in range(column_index + 1, self.nb_cols):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(column=j)
self.cells[i].remove(self.cells[i][column_index])
self.nb_cols -= 1
self._update_scroll_region()
return True
return False
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be destroyed after call to
this method.
'''
pass
def clear_all_data(self):
''' Clears all data from all cells.
'''
for i in range(self.nb_rows):
for j in range(self.nb_cols):
self.before_cell_clear(self.cells[i][j])
self.cells[i][j].delete(0, END)
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be cleared after call
to this method.
'''
pass
def _update_scroll_region(self):
''' Updates scroll region. This method must be called each
time table size or number of columns or rows change.
'''
# ensures that bbox will calculate border correctly
self.frame_with_table.update()
on_canvas_resize(self.canvas)
def read_coefficients(self):
''' Converts data stored as a list to a proper dictionary
necessary for constructing data instance.
'''
return convert_to_dictionary(self.data, self.check_value)
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
In this class it always returns True and can be redefined in
children classes.
'''
return True
class TableFrameWithInputOutputBox(TableFrame):
''' Extends TableFrame with extra functionality necessary for data
modification and choosing input and output categories.
Attributes:
params_frame (ParamsFrame): frame with parameters, this
class communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
panel_text_observer (PanelTextObserver): observer that adds star to
label frame of the parent of this widget.
This class notifies panel_text_observer
when data was modified.
frames (list of Frame): list of frames that hold Checkbuttons for
choosing input and output categories.
row_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing rows.
col_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing columns.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object that
is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file
and input and output categories
must be checked depending on parameters file.
data (list of list of str or float): input data, it might
be modified by this class.
Args:
parent (Tk object): parent of this widget.
params_frame (ParamsFrame): frame with parameters, this class
communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object
that is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file and input
and output categories
must be checked depending on parameters file.
if_text_modified_str (StringVar): StringVar object that is used
by PanelTextObserver, its content is modified when data
was modified.
data (list of list of str or float): input data, it might be
modified by this class.
nb_rows (int, optional): number of rows of the table, defaults
to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, params_frame,
combobox_text_var, current_categories,
str_var_for_input_output_boxes,
if_text_modified_str, data,
nb_rows=20, nb_cols=5):
self.params_frame = params_frame
self.combobox_text_var = combobox_text_var
self.panel_text_observer = PanelTextObserver(if_text_modified_str)
self.frames = []
self.row_checkboxes = []
self.col_checkboxes = []
self.current_categories = current_categories
self.str_var_for_input_output_boxes = str_var_for_input_output_boxes
self.str_var_for_input_output_boxes.trace('w', self.on_load_categories)
super().__init__(parent, data, nb_rows, nb_cols)
def create_widgets(self):
''' Creates widgets of this class.
'''
super().create_widgets()
for column_index in range(self.nb_cols - 1):
self._create_input_output_box(column_index)
for row_index in range(self.nb_rows):
self.add_row_check_box(row_index)
# add observers to add * in the first column
for row_index in range(self.nb_rows):
self.cells[row_index][0].panel_text_observer = self.panel_text_observer
def create_entry_widget(self, parent):
''' Creates SelfValidatingEntry widget.
Args:
parent (Tk object): parent of the SelfValidatingEntry widget.
Returns:
SelfValidatingEntry: created SelfValidatingEntry widget.
'''
return SelfValidatingEntry(parent, self.data, self.cells, width=CELL_WIDTH)
def deselect_all_boxes(self):
''' Deselects all Checkbuttons used for choosing input and
output categories.
'''
for frame in self.frames:
for child in frame.winfo_children():
child.deselect()
def _create_input_output_box(self, column_index):
''' Creates Checkbuttons used for choosing input and output categories.
Args:
column_index (int): index of a column for which
Checkbuttons must be created.
'''
frame_for_btns = Frame(self.frame_with_table)
self.frames.append(frame_for_btns)
input_var = IntVar()
output_var = IntVar()
input_btn = ObserverCheckbutton(
frame_for_btns, input_var, output_var,
self.params_frame.input_categories_frame,
self.params_frame.output_categories_frame,
self.current_categories, self.cells, INPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var,
text='Input', state=DISABLED)
input_btn.grid(row=1, column=0, sticky=N+W)
output_btn = FollowingObserverCheckbutton(
frame_for_btns, output_var, input_var,
self.params_frame.output_categories_frame,
self.params_frame.input_categories_frame,
self.current_categories, self.cells, OUTPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var, input_btn,
text='Output', state=DISABLED)
output_btn.grid(row=2, column=0, sticky=N+W)
self._add_observers(input_btn, output_btn, column_index + 1)
var = IntVar()
column_checkbox = CheckbuttonWithVar(frame_for_btns, var)
column_checkbox.grid(row=0, column=0)
self.col_checkboxes.append((column_checkbox, var))
frame_for_btns.grid(row=1, column=column_index + 2, sticky=N)
def _add_observers(self, input_btn, output_btn, column_index):
''' Adds observers to newly created cells in a given column.
Args:
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used
to select output categories.
column_index (int): index of the column to cells of
which observers must be added.
'''
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for row_index in range(self.nb_rows):
self._add_observers_to_cell(self.cells[row_index][column_index],
names_modifier, input_btn, output_btn)
def _add_observers_to_cell(self, cell, names_modifier, input_btn,
output_btn):
''' Adds given observers to a given cell.
Args:
cell (SelfValidatingEntry): cell where observers must be added.
names_modifier (DefaultCategoriesAndDMUModifier): observer,
for details see DefaultCategoriesAndDMUModifier.
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used to
select output categories.
'''
cell.observers.append(names_modifier) # IMPORTANT:
# this observer MUST be added first, it modifies data that
# is used by other observers!
cell.observers.append(input_btn)
cell.observers.append(output_btn)
cell.panel_text_observer = self.panel_text_observer
def on_load_categories(self, *args):
''' Selects input and output categories when data is loaded from
parameters file. Args are provided by the StringVar trace
methods and are ignored in this method.
'''
for frame in self.frames:
for child in frame.winfo_children():
try:
category = child.get_category()
except AttributeError:
pass
else:
if (child.observer_type == INPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.input_categories):
child.select()
if (child.observer_type == OUTPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.output_categories):
child.select()
def add_row_check_box(self, row_index):
''' Adds Checkbutton used for removing rows to a given row.
Args:
row_index (int): index of row to which Checkbutton
must be added.
'''
if row_index >= 1:
var = IntVar()
row_checkbox = Checkbutton(self.frame_with_table, variable=var)
self.row_checkboxes.append((row_checkbox, var))
row_checkbox.grid(row=row_index + 2, column=0)
else:
self.row_checkboxes.append((None, None))
def add_column(self):
''' Adds one column to the end of table.
'''
super().add_column()
self._create_input_output_box(self.nb_cols - 2)
def add_row(self):
''' Adds one row to the end of table.
Note: When data is spread across several pages, addition of
row must also update the display of data.
This functionality is implemented in TableModifierFrame.
'''
super().add_row()
self.add_row_check_box(self.nb_rows - 1)
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for col in range(1, self.nb_cols):
input_btn, output_btn = self.get_check_boxes(col - 1)
self._add_observers_to_cell(self.cells[self.nb_rows - 1][col],
names_modifier,
input_btn, output_btn)
def get_check_boxes(self, column_index):
''' Gets Checkbuttons used for selecting input and output categories
for a given column.
Args:
column_index (int): index of the column for which Checkbuttons
must be returned.
Returns:
tuple of ObserverCheckbutton, FollowingObserverCheckbutton:
tuple of observers
or None, None if no observers were found.
'''
if column_index < 0 or column_index >= len(self.frames):
return None, None
input_btn = None
output_btn = None
for child in self.frames[column_index].winfo_children():
try:
observer_type = child.observer_type
except AttributeError:
pass
else:
if observer_type == INPUT_OBSERVER:
input_btn = child
elif observer_type == OUTPUT_OBSERVER:
output_btn = child
return input_btn, output_btn
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of columns
of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# we must record category name before removing column,
# because it will disappear
if column_index < len(self.cells[0]):
category_name = self.cells[0][column_index].get().strip()
else:
category_name = ''
if super().remove_column(column_index):
col = column_index - 1
if category_name:
self.params_frame.input_categories_frame.remove_category(
category_name)
self.params_frame.output_categories_frame.remove_category(
category_name)
if col < len(self.current_categories):
self.current_categories[col] = ''
# remove from data only if category is present
if self.data:
column_with_data_removed = False
for row_index in range(len(self.data)):
if column_index < len(self.data[row_index]):
self.data[row_index].pop(column_index)
column_with_data_removed = True
if column_with_data_removed:
for row in range(1, self.nb_rows):
for j in range(column_index, self.nb_cols):
self.cells[row][j].data_column -= 1
self.panel_text_observer.change_state_if_needed()
self.frames[col].destroy()
for i in range(col + 1, len(self.frames)):
self.frames[i].grid_remove()
self.frames[i].grid(column=i + 1)
self.frames.pop(col)
self.col_checkboxes.pop(col)
return True
return False
def remove_row(self, row_index):
''' Removes data row with a specified index from the table.
Row is not physically removed.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
if self.should_remove_row(row_index):
if self.data:
nb_pages = calculate_nb_pages(len(self.data), self.nb_rows)
data_index = self.get_data_index(row_index)
nb_cols = len(self.cells[row_index])
if data_index != -1 and data_index < len(self.data):
nb_rows_to_change = min(self.nb_rows, len(self.data) + 1)
self.data.pop(data_index)
for row in range(row_index + 1, nb_rows_to_change):
for col in range(0, nb_cols):
if self.cells[row][col].data_row != -1:
self.cells[row][col].data_row -= 1
self.panel_text_observer.change_state_if_needed()
super().remove_row(row_index)
if (nb_pages > 1):
self.add_row()
else:
super().remove_row(row_index)
self.row_checkboxes[row_index][0].destroy()
for i in range(row_index + 1, len(self.row_checkboxes)):
self.row_checkboxes[i][0].grid_remove()
self.row_checkboxes[i][0].grid(row=i + 1)
self.row_checkboxes.pop(row_index)
return True
return False
def get_data_index(self, row_index):
for j in range(0, len(self.cells[row_index])):
if self.cells[row_index][j].data_row != -1:
return self.cells[row_index][j].data_row
return -1
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
Notifies observers if data is not empty.
Args:
cell (SelfValidatingEntry): cell that will be destroyed
after call to this method.
'''
info = cell.grid_info()
col = int(info['column'])
row = int(info['row'])
if len(self.data) == 0:
cell.notify_observers(CELL_DESTROY, row, col)
def load_visible_data(self):
''' Displays data in the table. First, it adds more rows to fill
the frame, second, it displays data that fits the table.
'''
self.add_rows_to_fill_visible_frame()
self.display_data()
def display_data(self, start_row=0):
''' Displays data starting from a given data row.
This method is usually called by NavigationForTableFrame when
data spans across
several pages and users clicks on page navigation buttons.
Args:
start_row (int, optional): index of input data starting
from which data should be displayed, defaults to 0.
'''
nb_data_rows = len(self.data)
nb_displayed_rows = 0
for row_index in range(start_row, nb_data_rows):
values = self.data[row_index]
# do not insert data that is not visible
if nb_displayed_rows + 1 >= self.nb_rows:
return
for column_index, coeff in enumerate(values):
# row_index + 1 - first row has categories
self._display_one_cell(nb_displayed_rows, column_index,
coeff, row_index,
column_index, False)
row_index += 1
nb_displayed_rows += 1
if len(self.data) > 0:
nb_cols = len(self.data[0])
else:
nb_cols = self.nb_cols
nb_rows = self.nb_rows - 1 # -1 because we add +1 to row_index
while nb_displayed_rows < nb_rows:
for column_index in range(nb_cols):
self._display_one_cell(nb_displayed_rows, column_index, '',
-1, -1, False)
nb_displayed_rows += 1
def _display_one_cell(self, row_index, column_index, value_to_dispay,
data_row, data_col, modify_data=True):
''' Displays data in a cell and sets cell's fields to proper values.
Args:
row_index (int): index of a row where the cell is.
column_index (int): index of a column where the cell is.
value_to_dispay (str): new cell value_to_dispay.
data_row (int): row index of input data.
data_col (int): column index of input data.
modify_data (bool, optional): True if data was modified and
observers
must be notified, False otherwise.
'''
cell_row_index = row_index + 1
self.cells[cell_row_index][column_index].modify_data = modify_data
self.cells[cell_row_index][column_index].text_value.set(value_to_dispay)
self.cells[cell_row_index][column_index].data_row = data_row
self.cells[cell_row_index][column_index].data_column = data_col
def add_rows_to_fill_visible_frame(self):
''' Adds rows to table to fill the frame. Usually adds a bit more and
scroll gets activated.
Exact number of added rows depends on operating system, height of
widgets and screen size.
'''
self.canvas.update_idletasks()
frame_height = self.canvas.winfo_height()
while self.canvas.bbox(ALL)[3] <= frame_height - 20:
self.add_row()
self._update_scroll_region()
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
Args:
count (int): data column index.
Returns:
bool: True if the category in the given column index is not
an empty string,
False otherwise.
'''
if self.current_categories[count]:
return True
return False
def clear_all_data(self):
''' Clears all data from all cells and clears input data.
'''
self.data.clear()
super().clear_all_data()
self.current_categories.clear()
# reset modify data back to true
for cell_row in self.cells:
for cell in cell_row:
cell.modify_data = True
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
It sets fields of the given cell to initial values.
Args:
cell (SelfValidatingEntry): cell that will be cleared after
call to this method.
'''
cell.modify_data = False
cell.data_row = -1
cell.data_column = -1
class ObserverCheckbutton(Checkbutton):
''' This class implements Checkbutton for choosing input/output categories.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton is
selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column
as the entire column of table entries is gridded, because
this class uses parent grid column index to determine
the column where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays selected
input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame must be CategoriesCheckBox object that
displays selected output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame
displays input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for possible
values see dea_utils.
change_category_name (callable function): this function is
called when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
Arguments are the same as attributes.
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, change_category_name, data,
combobox_text_var, *args, **kw):
Checkbutton.__init__(self, parent, variable=var,
command=self._process, *args, **kw)
self.var = var
self.opposite_var = opposite_var
self.parent = parent
self.category_frame = category_frame
self.opposite_category_frame = opposite_category_frame
self.current_categories = current_categories
self.cells = cells
self.data = data
self.observer_type = observer_type
self.change_category_name = change_category_name
self.combobox_text_var = combobox_text_var
def _process(self):
''' This method is called when user clicks on Checkbutton.
Makes sure that the same category can be only input or only
output, but not both, and that selected category cannot also
be selected as a categorical category.
'''
category_name = self.get_category()
if self.var.get() == 1:
self.opposite_var.set(0)
if category_name:
self.category_frame.add_category(category_name)
self.opposite_category_frame.remove_category(category_name)
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
elif category_name:
self.category_frame.remove_category(category_name)
def deselect(self):
''' Deselects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(0)
def select(self):
''' Selects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(1)
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data or categories were modified.
Also modifies current_categories if needed.
This widget becomes disabled if invalid category name value or input
data value were provided by user.
Args:
entry (SelfValidatingEntry): Entry widget whose content was
modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if entry_state == CHANGE_CATEGORY_NAME:
old_name = ''
internal_col = col - 2
if internal_col < len(self.current_categories):
old_name = self.current_categories[internal_col]
category_name = validate_category_name(
self.cells[0][col - 1].text_value.get().strip(),
internal_col, self.current_categories)
if category_name:
index = len(self.current_categories)
while index <= internal_col:
self.current_categories.append('')
index += 1
self.current_categories[internal_col] = category_name
if old_name:
# change category name in params_frame
self.change_category_name(old_name.strip(), category_name)
self.change_state_based_on_data(entry, entry_state, row, col)
entry.config(foreground='black')
else:
# if category name is empty, disable
self.disable(internal_col, old_name)
entry.config(foreground='red')
else:
self.change_state_based_on_data(entry, entry_state, row, col)
def change_state_based_on_data(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
internal_col = col - 2
# IMPORTANT: read from cells, not from current_categories, they might
# be empty at this stage
category_name = self.cells[0][col - 1].text_value.get().strip()
nb_rows = len(self.data)
if nb_rows == 0:
self.disable(internal_col, category_name)
return
elif len(self.data[0]) == 0:
self.disable(internal_col, category_name)
return
has_one_valid_entry = False
for row_index in range(nb_rows):
# can happen if some values are empty
while col - 1 >= len(self.data[row_index]):
self.data[row_index].append('')
try:
# col - 1 - first column contains DMU names
data_elem = float(self.data[row_index][col - 1])
except ValueError:
state = NOT_VALID_COEFF
else:
state = is_valid_coeff(data_elem)
if state == NOT_VALID_COEFF:
has_one_valid_entry = False
self.disable(internal_col, category_name)
return
elif state == VALID_COEFF or state == WARNING_COEFF:
has_one_valid_entry = True
if has_one_valid_entry:
self.config(state=NORMAL)
if category_name:
if category_name not in self.current_categories:
assert internal_col < len(self.current_categories)
self.current_categories[internal_col] = category_name
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
return
def disable(self, internal_col, category_name):
''' Disables Checkbutton.
Args:
internal_col (int): internal column index.
category_name (str): name of category.
'''
self.config(state=DISABLED)
if category_name:
if self.var.get() == 1:
self.category_frame.remove_category(category_name)
if self.opposite_var.get() == 1:
self.opposite_category_frame.remove_category(category_name)
if category_name in self.current_categories:
assert(internal_col < len(self.current_categories))
self.current_categories[internal_col] = ''
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
def get_category(self):
''' Finds category name stored in the corresponding Entry widget
based on where parent of Checkbutton was gridded.
Returns:
str: category name, might be empty string.
'''
info = self.parent.grid_info()
# convertion to int is necessary for Windows
# for some reason in Windows grid info is stored as str
col = int(info['column'])
return self.cells[0][col - 1].text_value.get().strip()
class FollowingObserverCheckbutton(ObserverCheckbutton):
''' This class follows state of another ObserverCheckbutton that is
used to select input or output categories.
This class is used in order to skip checking if data is valid
second time. The first Checkbutton has already performed this check.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton
is selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column as the entire column of table entries
is gridded, because this class uses parent grid column
index to determine the column
where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays
selected input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame
must be CategoriesCheckBox object that displays selected
output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame displays
input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for
possible values see dea_utils.
change_category_name (callable function): this function is called
when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
main_box (ObserverCheckbutton): Checkbutton that changes state
first. This Checkbutton changes its state to the same state
as main_box, but does not do extra things
that have been already performed by main_box
(changes to current_categories, for example).
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, main_box, *args, **kw):
super().__init__(parent, var, opposite_var, category_frame,
opposite_category_frame, current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, *args, **kw)
self.main_box = main_box
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified depending on
the state of main_box.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
category_name = self.get_category()
if str(self.main_box.cget('state')) == DISABLED:
self.disable(col - 2, category_name)
else:
self.config(state=NORMAL)
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
class DefaultCategoriesAndDMUModifier(object):
''' This class is responsible for adding automatic category and DMU names
if user starts typing data without providing such names first.
Attributes:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
Args:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
'''
def __init__(self, cells, current_categories):
self.cells = cells
self.current_categories = current_categories
def change_state_if_needed(self, entry, entry_state, row, col):
''' Writes automatic category and DMU names if they were not
specified before.
Args:
entry (SelfValidatingEntry): Entry widget the content
of which was modified.
entry_state (int): constant that describes entry state,
for details see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if (entry_state != EMPTY_COEFF and entry_state != CELL_DESTROY and
entry_state != CHANGE_CATEGORY_NAME):
internal_row_index = row - 2
dmu_name = self.cells[internal_row_index][0].text_value.get().strip()
if not dmu_name:
self.cells[internal_row_index][0].text_value.set(
'DMU{0}'.format(internal_row_index))
category_name = self.cells[0][col - 1].text_value.get().strip()
if not category_name:
internal_col_index = col - 2
name = 'Category{0}'.format(internal_col_index)
if internal_col_index >= len(self.current_categories):
index = len(self.current_categories) - 1
while index != internal_col_index:
self.current_categories.append('')
index += 1
# category name MUST be written first, because next line calls
# ObserverCheckbutton
self.cells[0][col - 1].text_value.set(name)
class SelfValidatingEntry(Entry):
''' This class implement Entry widget that knows how to highlight
invalid data. It also notifies other widgets if the content of
Entry changes. Other widgets must implement method
change_state_if_needed().
Such widgets should be appended to the list of listening widgets
called observers.
Attributes:
text_value (StringVar): textvariable of Entry widget that
calls method on_text_changed when the content on Entry changes.
observers (list of objects that implement method change_state_if_needed):
list of widgets or other objects that must be notified if the
content of Entry changes.
data_row (int): row index in data table which should be modified
when the content of Entry changes.
data_column (int): column index in data table which should be
modified when the content of Entry changes.
data (list of list of srt or float): data that will be modified.
modify_data (bool): True if data should be modified, False
otherwise. It is usually set to False when data is uploaded
from file.
panel_text_observer (PanelTextObserver): object that is notified
when data changes.
This object is responsible for adding star to file name when
data was modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
Args:
parent (Tk object): parent of this Entry widget.
data (list of list of srt or float): input data that will
be modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
'''
def __init__(self, parent, data, all_cells, *args, **kw):
self.text_value = StringVar(master=parent)
self.text_value.trace("w", self.on_text_changed)
super().__init__(parent, *args, **kw)
self.config(textvariable=self.text_value)
self.observers = []
self.all_cells = all_cells
self.data_row = -1
self.data_column = -1
self.data = data
self.modify_data = True
self.panel_text_observer = None
def on_text_changed(self, *args):
''' This method is called each time the content of Entry is modified.
It highlights invalid data, changes data if needed and notifies
other objects when data was changed.
Args are provided by StringVar trace method, but are not used.
'''
info = self.grid_info()
# phisical grid indeces
col = int(info['column'])
row = int(info['row'])
self.notify_panel_observer()
if row == 2: # possibly name of category is modified
self.notify_observers(CHANGE_CATEGORY_NAME, row, col)
elif col == 1 and row > 2: # column with DMU names, strings are allowed
self.modify_data_if_needed(row, col)
elif col > 1 and row > 2: # everything left
self.modify_data_if_needed(row, col)
try:
value = float(self.text_value.get().strip())
except ValueError:
self.modify_data = True
self.config(foreground='red')
if len(self.text_value.get().strip()) == 0:
self.notify_observers(EMPTY_COEFF, row, col)
else:
self.notify_observers(NOT_VALID_COEFF, row, col)
return
text_status = is_valid_coeff(value)
if text_status == NOT_VALID_COEFF:
self.config(foreground='red')
elif text_status == WARNING_COEFF:
self.config(foreground='orange')
else:
self.config(foreground='black')
self.notify_observers(text_status, row, col)
self.modify_data = True
def modify_data_if_needed(self, row, col):
''' Modifies data if modify_data is set to True.
Adds empty strings to data when user modifies Entry for which
data_row or/and data_column are equal to -1. Updates data with new
values entered by user.
Args:
row (int): row where Entry is gridded
col (int): column where Entry is gridded
'''
if self.modify_data:
if self.data_row != -1 and self.data_column != -1:
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
else:
row_for_data = len(self.data)
added_rows = False
# -2 because row is physical grid index, not cell index
row_count = len(self.all_cells) - 1
for cells_row in reversed(self.all_cells):
if cells_row[0].data_row != -1:
break
row_count -= 1
if row_count == -1:
row_count = 0
while row_count < row - 2:
self.data.append([])
added_rows = True
row_count += 1
if added_rows:
self.data_row = len(self.data) - 1
else:
assert row_count >= row - 2
self.data_row = len(self.data) - 1 - (row_count - (row - 2))
col_for_data = len(self.data[self.data_row])
added_cols = False
max_nb_col = 0
nb_rows = len(self.data)
for r_ind in range(nb_rows):
row_len = len(self.data[r_ind])
if row_len > max_nb_col:
max_nb_col = row_len
max_nb_col = max(max_nb_col, col)
c_ind = col_for_data
while c_ind < max_nb_col:
self.data[self.data_row].append('')
grid_col = len(self.data[self.data_row])
self.all_cells[row - 2][grid_col - 1].data_row = self.data_row
self.all_cells[row - 2][grid_col - 1].data_column = c_ind
self.notify_observers(EMPTY_COEFF, row, grid_col)
added_cols = True
c_ind += 1
if (col_for_data < col):
col_for_data += 1
if added_cols:
for r_ind in range(nb_rows):
while len(self.data[r_ind]) < max_nb_col:
self.data[r_ind].append('')
grid_col = len(self.data[r_ind])
if r_ind >= self.data_row - (row - 3): # 3 is the first physical
# row with data on the page
grid_row = row - (self.data_row - r_ind)
self.all_cells[grid_row - 2][grid_col - 1].data_row = r_ind
self.all_cells[grid_row - 2][grid_col - 1].data_column = grid_col - 1
self.notify_observers(EMPTY_COEFF, grid_row, grid_col)
self.data_column = col_for_data - 1
else:
self.data_column = col - 1
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
def notify_panel_observer(self):
''' Notifies panel observer that data was modified.
'''
if self.panel_text_observer is not None and self.modify_data is True:
self.panel_text_observer.change_state_if_needed()
def notify_observers(self, entry_state, row, col):
''' Notifies all observers stored in list of observers that data
was modified.
Args:
entry_state (int): state of the Entry widget that describes if
data is valid after modification, for possible values see
dea_utils module.
row (int): row where Entry is gridded.
col (int): column where Entry is gridded.
'''
for observer in self.observers:
observer.change_state_if_needed(self, entry_state, row, col)
class PanelTextObserver(object):
''' This class changes StringVar value that is traced in other classes.
Attributes:
if_text_modified_str (StringVar): StringVar object that
changes value when this observer is notified.
'''
def __init__(self, if_text_modified_str):
self.if_text_modified_str = if_text_modified_str
def change_state_if_needed(self):
''' Changes value of internal StringVar object.
'''
self.if_text_modified_str.set('*')
class CheckbuttonWithVar(Checkbutton):
''' Custom Checkbutton widget that provides deselect method.
Attributes:
var (IntVar): 0 if not selected, 1 otherwise.
Args:
parent (Tk object): parent of this widget.
var (IntVar): variable that controls if Checkbutton is selected.
'''
def __init__(self, parent, var, *args, **kw):
super().__init__(parent, variable=var, *args, **kw)
self.var = var
def deselect(self):
''' Deselects Checkbutton.
'''
self.var.set(0)
| 43.186706
| 101
| 0.586473
|
0d1cb2bd13372c70e8d90b7d4a0a7f5ca93d8028
| 1,487
|
py
|
Python
|
haipproxy-0.1/utils/redis_util.py
|
sampleCJ/haipproxy
|
656f7b1ee884ff20f6b58662b7a80e92c9c7f881
|
[
"MIT"
] | 1
|
2018-03-20T09:14:33.000Z
|
2018-03-20T09:14:33.000Z
|
haipproxy-0.1/utils/redis_util.py
|
sampleCJ/haipproxy
|
656f7b1ee884ff20f6b58662b7a80e92c9c7f881
|
[
"MIT"
] | null | null | null |
haipproxy-0.1/utils/redis_util.py
|
sampleCJ/haipproxy
|
656f7b1ee884ff20f6b58662b7a80e92c9c7f881
|
[
"MIT"
] | 1
|
2018-09-08T08:06:54.000Z
|
2018-09-08T08:06:54.000Z
|
import uuid
import time
import redis
from config.settings import (
REDIS_HOST, REDIS_PORT, DEFAULT_REDIS_DB,
REDIS_PASSWORD, LOCKER_PREFIX)
def get_redis_conn(**kwargs):
host = kwargs.get('host', REDIS_HOST)
port = kwargs.get('port', REDIS_PORT)
db = kwargs.get('db', DEFAULT_REDIS_DB)
password = kwargs.get('password', REDIS_PASSWORD)
return redis.StrictRedis(host, port, db, password)
def acquire_lock(conn, lock_name, acquire_timeout=10, lock_timeout=10):
"""inspired by book 'redis in action' """
identifier = str(uuid.uuid4())
lock_name = LOCKER_PREFIX + lock_name
end = time.time() + acquire_timeout
while time.time() < end:
if conn.set(lock_name, identifier, lock_timeout, nx=True):
return identifier
elif not conn.ttl(lock_name) or conn.ttl(lock_name) == -1:
conn.expire(lock_name, lock_timeout)
time.sleep(0.1)
return False
def release_lock(conn, lock_name, identifier):
pipe = conn.pipeline(True)
lock_name = LOCKER_PREFIX + lock_name
while True:
try:
pipe.watch(lock_name)
identifier_origin = pipe.get(lock_name).decode()
if identifier_origin == identifier:
pipe.multi()
pipe.delete(lock_name)
pipe.execute()
return True
pipe.unwatch()
break
except redis.exceptions.WatchError:
pass
return False
| 27.537037
| 71
| 0.631473
|
2d556724ace48fefdd785ad63151add648429fc7
| 6,264
|
py
|
Python
|
projects/ibsr/config.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | 1
|
2018-12-06T09:17:26.000Z
|
2018-12-06T09:17:26.000Z
|
projects/ibsr/config.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | null | null | null |
projects/ibsr/config.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | 2
|
2019-05-07T10:07:33.000Z
|
2019-05-20T12:50:37.000Z
|
config = dict()
config["env"] = "SERVER" # change this to "FULL" if you want to run full
# config["mode"] = "TEST" # change this to "FULL" if you want to run full
config["mode"] = "FULL" # change this to "FULL" if you want to run full
config["data_folders"] = ["data_train", "data_valid"]
# change this if you want to only use some of the modalities
config["all_modalities"] = ["t1"]
config["training_modalities"] = config["all_modalities"]
config["nb_channels"] = len(config["training_modalities"])
config["truth_old"] = ["seg"]
config["truth"] = ["truth"]
config["groundtruth_modalities"] = config["truth_old"] + config["truth"]
config["mask"] = ["mask"]
if config["mode"] == "TEST":
config["dataset"] = ["test"]
else:
config["dataset"] = ["original", "preprocessed",
"denoised_original", "denoised_preprocessed",
"test"]
config["dataset_minh_normalize"] = ["original_minh_normalize", "preprocessed_minh_normalize",
"denoised_original_minh_normalize", "denoised_preprocessed_minh_normalize",
"test_minh_normalize"]
config["original_folder"] = ["original_bak"]
config["project_name"] = "3DUnetCNN_BRATS"
config["brats_folder"] = "projects/ibsr"
config["dataset_folder"] = "projects/ibsr/database"
config["template_data_folder"] = "database/data_train"
config["template_folder"] = "IBSR_01"
# config_unet["image_shape"] = (240, 240, 155) # This determines what shape the images will be cropped/resampled to.
# This determines what shape the images will be cropped/resampled to.
# config["image_shape"] = (160, 192, 128)
config["image_shape"] = (144,144,144)
# config["is_create_patch_index_list_original"] = False
config["labels"] = (1, 2, 3) # the label numbers on the input image
# config["labels"] = (0, 1, 2, 4) # the label numbers on the input image
config["n_labels"] = len(config["labels"])
# configs of u-net
config_unet = dict()
# pool size for the max pooling operations
config_unet["pool_size"] = (2, 2, 2)
# switch to None to train on the whole image
config_unet["patch_shape"] = (128, 128, 128)
if "patch_shape" in config_unet and config_unet["patch_shape"] is not None:
config_unet["input_shape"] = tuple(
[config["nb_channels"]] + list(config_unet["patch_shape"]))
else:
config_unet["input_shape"] = tuple(
[config["nb_channels"]] + list(config_unet["image_shape"]))
config_unet["truth_channel"] = config["nb_channels"]
# if False, will use upsampling instead of deconvolution
config_unet["deconvolution"] = True
config_unet["depth"] = 4
config_unet["n_base_filters"] = 16
config_unet["batch_size"] = 1
config_unet["validation_batch_size"] = 2
config_unet["n_epochs"] = 200 # cutoff the training after this many epochs
# learning rate will be reduced after this many epochs if the validation loss is not improving
# config_unet["patience"] = 10
config_unet["patience"] = 20
# training will be stopped after this many epochs without the validation loss improving
config_unet["early_stop"] = 50
config_unet["initial_learning_rate"] = 1e-4 # factor by which the learning rate will be reduced
config_unet["learning_rate_drop"] = 0.2 # portion of the data that will be used for training
# config_unet["learning_rate_epochs"] = 1
config_unet["validation_split"] = 0.8 # if > 0, during training, validation patches will be overlapping
config_unet["validation_patch_overlap"] = 0 # randomly offset the first patch index by up to this offset
config_unet["training_patch_start_offset"] = None
# if False, extract patches only in bouding box of mask
config_unet["is_create_patch_index_list_original"] = True
config["augment_flipud"] = False
# config["augment_fliplr"] = True
config["augment_fliplr"] = False
# config["augment_elastic"] = True
config["augment_elastic"] = False
config["augment_rotation"] = False
# config["augment_rotation"] = True
config["augment_shift"] = False
config["augment_shear"] = False
config["augment_zoom"] = False
config["n_augment"] = 0
config["flip"] = False # augments the data by randomly flipping an axis during
# data shape must be a cube. Augments the data by permuting in various directions
config["permute"] = True
config["distort"] = None # switch to None if you want no distortion
config["augment"] = config["flip"] or config["distort"]
# if True, then patches without any target will be skipped
config["skip_blank"] = True
# Dictionary
config_dict = dict()
config_dict["challenge"] = ["brats"]
config_dict["year"] = [2018, 2019]
config_dict["model"] = ["unet", "isensee", "densefcn", "denseunet", "resunet", "seunet", "seisensee", "simple", "eye", "m", "m2", "multi"]
config_dict["model_depth"] = ["unet", "seunet", "multi", "denseunet", "resunet"]
# "deepmedic", "maskrcnn", "cascaded", "proposed"]
config_dict["depth_unet"] = [3, 4, 5, 6] # depth of unet
config_dict["n_base_filters_unet"] = [4, 8, 16, 32] # number of base filters of unet
config_dict["image_shape"] = ["160-192-128", "144-144-144", "240-240-155"]
config_dict["patch_shape"] = ["16-16-16", "32-32-32", "64-64-64", "128-128-128", "160-192-128", "160-192-1", "160-192-7"]
config_dict["is_bias_correction"] = ["0","1"]
config_dict["is_denoise"] = ["0", "bm4d", "gaussian", "median"]
config_dict["is_normalize"] = ["z", "01"]
config_dict["is_crf"] = ["0", "post", "cnn", "rnn"]
config_dict["crop"] = ["0", "1"]
config_dict["hist_match"] = ["0", "1"]
config_dict["loss"] = ["weighted", "minh", "tversky", "tv_minh"]
config_convert_name = {
"original": "bias-0_denoise-0",
"preprocessed": "bias-1_denoise-0",
"denoised_original": "bias-0_denoise-bm4d",
"denoised_preprocessed": "bias-1_denoise-bm4d",
}
config_finetune = dict()
config_finetune["n_epochs"] = 100 # cutoff the training after this many epochs
# learning rate will be reduced after this many epochs if the validation loss is not improving
config_finetune["patience"] = 5
# training will be stopped after this many epochs without the validation loss improving
config_finetune["early_stop"] = 16
config_finetune["initial_learning_rate"] = 4e-5 # factor by which the learning rate will be reduced
config_finetune["learning_rate_drop"] = 0.2 # portion of the data that will be used for training
| 46.058824
| 138
| 0.706258
|
1256290643e8c03c0652e705e96b3a0ff7dfb61b
| 741
|
py
|
Python
|
data-mining/cluster-analysis/assignment/clustering-data/python/main_k_means.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
data-mining/cluster-analysis/assignment/clustering-data/python/main_k_means.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
data-mining/cluster-analysis/assignment/clustering-data/python/main_k_means.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
import sys
from LoadData import *
from k_means import *
from evaluation import *
if __name__ == "__main__":
if len(sys.argv) != 3:
print "[usage] <data-file> <ground-truth-file>"
exit(1)
dataFilename = sys.argv[1]
groundtruthFilename = sys.argv[2]
data = loadPoints(dataFilename)
groundtruth = loadClusters(groundtruthFilename)
nDim = len(data[0])
K = 2 # Suppose there are 2 clusters
centers = []
centers.append(data[0])
centers.append(data[1])
results = kmeans(data, centers)
res_Purity = purity(groundtruth, results)
res_NMI = NMI(groundtruth, results)
print "Purity =", res_Purity
print "NMI = ", res_NMI
| 21.171429
| 55
| 0.608637
|
1b4981087626856baeb84bb50ff45392b851c655
| 473
|
py
|
Python
|
caluculating_error.py
|
araj29011998/refjdnsdf
|
7dadbc393b061e2b1e671c386572aa469dd80770
|
[
"MIT"
] | null | null | null |
caluculating_error.py
|
araj29011998/refjdnsdf
|
7dadbc393b061e2b1e671c386572aa469dd80770
|
[
"MIT"
] | null | null | null |
caluculating_error.py
|
araj29011998/refjdnsdf
|
7dadbc393b061e2b1e671c386572aa469dd80770
|
[
"MIT"
] | null | null | null |
import pandas as pd
from math import sqrt
from sklearn.metrics import mean_squared_error
#predicted and expected values obtained by implementing ARIMA model code on data set of each user
res=pd.read_csv('predicted_expected_values_for_all_users.csv')
#userid=[x for x in res['userid']]
predicted=[x for x in res['predicted']]
expected=[x for x in res['expected']]
#caluculating RMSE value
error = sqrt(mean_squared_error(predicted, expected))
print(error)
| 31.533333
| 98
| 0.767442
|
17f7cda687dd4e78d7c2979b3a5d49cff66f5202
| 1,340
|
py
|
Python
|
cleanplots.py
|
markessien/meplotter
|
393af7058530f82c7a37cd13d357021fe5ea87fd
|
[
"MIT"
] | null | null | null |
cleanplots.py
|
markessien/meplotter
|
393af7058530f82c7a37cd13d357021fe5ea87fd
|
[
"MIT"
] | null | null | null |
cleanplots.py
|
markessien/meplotter
|
393af7058530f82c7a37cd13d357021fe5ea87fd
|
[
"MIT"
] | null | null | null |
debug_file = "/home/mark/.chia/mainnet/log/debug.log"
file = open(debug_file, "r")
def get_file_name(str):
plotpos = str.find(".plot")
return str[:plotpos+5]
files_not_found = []
files_no_pk = []
for line in file:
err = -1
pos = line.find(" Failed to open file ")
if pos > -1:
err = 1
file_name = get_file_name(line[pos+21:])
pos = line.find("Looking up qualities on ")
if pos > -1:
err = 2
file_name = get_file_name(line[pos+24:])
time_pos = line.find(file_name + " took:") + len(file_name + " took:") + 1
time_required = float(line[time_pos:time_pos+4])
pos1 = line.find("WARNING Plot ")
pos2 = line.find("has a farmer public key that is not")
if pos1 > -1 and pos2 > -1:
err = 3
file_name = get_file_name(line[pos1+14:])
if err == 1:
print("File not found on " + file_name)
files_not_found.append(file_name)
if err == 2:
print("Slow lookup on " + file_name + " Time: " + str(time_required) + "s")
if err == 3:
print("PK not found for " + file_name)
files_no_pk.append(file_name)
print("Files not found")
for file_name in files_not_found:
print("rm " + file_name)
print("\n\nFiles without pk")
for file_name in files_no_pk:
print("rm " + file_name)
| 24.363636
| 83
| 0.59403
|
ad7b0e1de089fb7456efc881bbb88bbb29e1e2d8
| 2,862
|
py
|
Python
|
src/setup.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/setup.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/setup.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import py2exe
import shutil
from glob import glob
import os
from logger import logger
logging = logger.getChild("Installer")
name = 'TheQube'
__version__ = 0.9
__author__ = 'Andre Polykanine also known as Menelion Elensúlë'
def get_datafiles():
return [("", ["main.defaults"] + glob('*.exe') + glob("*.dll"))
] + list_all_documentation() + list_session_defaults() + accessible_output_data() + sound_lib_data() + certifi_data() + get_soundpacks() + get_locales()
def accessible_output_data():
import accessible_output2
return accessible_output2.find_datafiles()
def sound_lib_data():
import sound_lib
return sound_lib.find_datafiles()
def certifi_data():
import certifi
path = os.path.join(certifi.__path__[0], '*.pem')
results = glob(path)
dest_dir = os.path.join('certifi')
return [(dest_dir, results)]
def list_session_defaults():
files = glob('session/*/*.defaults') + glob('core/sessions/*/*.defaults')
answer = []
for i in files:
answer.append((os.path.split(i)[0], [i]))
return answer
def get_soundpacks():
answer = []
depth = 6
for root, dirs, files in os.walk('sounds'):
if depth == 0:
break
new = (root, glob('%s/*.wav' % root))
answer.append(new)
depth -= 1
return answer
def get_locales():
answer = []
for root, dirs, files in os.walk('locale'):
new = (root, glob(os.path.join(root, '*.mo')))
answer.append(new)
return answer
def list_all_documentation ():
answer = []
depth = 6
for root, dirs, files in os.walk('../Documentation'):
if depth == 0:
break
readme = (root[3:], [os.path.join(root, 'readme.html')])
answer.append(readme)
changelog = (root[3:], [os.path.join(root, 'changelog.html')])
answer.append(changelog)
depth -= 1
return answer
if __name__ == '__main__':
setup(
name = name,
author = __author__,
author_email = "theqube@groups.io",
version = __version__,
url = 'http://theqube.oire.org/',
packages = find_packages(),
data_files = get_datafiles(),
options = {
'py2exe': {
'packages': ['packaging', 'appdirs'],
'compressed': False,
'dll_excludes': ['w9xpopen.exe', 'MSVCP90.dll', 'mswsock.dll', 'powrprof.dll', 'MPR.dll', 'MSVCR100.dll', 'mfc90.dll', 'MSVFW32.dll', 'AVIFIL32.dll', 'AVICAP32.dll', 'ADVAPI32.dll', 'CRYPT32.dll', 'WLDAP32.dll'],
'optimize': 1,
'skip_archive': True,
'excludes': ["win32ui", "pywin.dialogs", "pywin.debugger.dbgcon", "tkinter", "tk", "Tkconstants", "Tkinter", "tcl", "_imagingtk", "PIL._imagingtk", "ImageTk", "PIL.ImageTk", "FixTk", "django", "gobject", "gtk", "unittest", "remote", "ZODB"],
}
},
windows = [
{
'script': 'main.pyw',
'dest_base': 'TheQube',
}
],
install_requires = [
]
)
| 27.519231
| 246
| 0.63522
|
5b37d85fafcc76f3684ecdfea45db41b28935524
| 8,593
|
py
|
Python
|
TH/forecast_models/LGBM.py
|
lorenzodonadio/TimeHierarchy
|
a84693d8f701cd88904313610d863add8b79769f
|
[
"MIT"
] | 1
|
2021-06-19T16:58:31.000Z
|
2021-06-19T16:58:31.000Z
|
TH/forecast_models/LGBM.py
|
lorenzodonadio/TimeHierarchy
|
a84693d8f701cd88904313610d863add8b79769f
|
[
"MIT"
] | null | null | null |
TH/forecast_models/LGBM.py
|
lorenzodonadio/TimeHierarchy
|
a84693d8f701cd88904313610d863add8b79769f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import warnings
from tbats import TBATS
from tqdm import tqdm
from .BaseFuncs import BaseFuncs
from ..utils.regressors import RegLGBM
from lightgbm import LGBMRegressor
import pdb
class ForecastLGBM(BaseFuncs):
def __init__(self,
col_name,
n_nodes,
hierarchy = None,
hierarchy_short = None,
use_short = True,
look_back = 96,
features = None):
'''Empty docstring'''
super().__init__()
if use_short and hierarchy_short is None:
raise ValueError('Must provide hierarchy_short')
if not use_short and hierarchy is None:
raise ValueError('Must provide hierarchy')
self.col_name = col_name
self.n_nodes = n_nodes
self.use_short = use_short
self.hierarchy = hierarchy
self.hierarchy_short = hierarchy_short
self.look_back = look_back
self.features = features
def prepare_y(self,df,horizon = 1):
y = []
for i in range(0,df.shape[0]-(self.look_back + horizon)):
tmp_y = df[self.col_name][i+self.look_back : i+self.look_back+horizon].values.reshape(1,-1).ravel()
y.append(tmp_y)
return np.array(y)
def prepare_x(self, df,horizon = 1,features = None):
df = self.df if df is None else df
x = []
for i in range(0,df.shape[0]-(self.look_back + horizon)):
tmp_x = df[self.col_name][i:i+self.look_back].values.reshape(1,-1).ravel()
if features is None:
x.append(tmp_x)
elif type(features) is type(list()):
ex = df[features].iloc[i+self.look_back + horizon].values #exhugenous features
x.append(np.hstack([tmp_x,ex]))
else:
raise ValueError('features must be a list of column names contained in df')
#pdb.set_trace()
return np.array(x)
def fit(self,eval_metric = 'l2',
sample_weight = None,
disable_tqdm = True,
**kwargs):
self.model_tree = {}
if self.use_short:
for k in tqdm(self.hierarchy_short, disable = disable_tqdm): # train the models
tmp_dict = {}
for k2 in self.hierarchy_short[k]:
X = self.prepare_x(self.hierarchy_short[k][k2],features = self.features)
y = self.prepare_y(self.hierarchy_short[k][k2])
tmp_dict[k2] = LGBMRegressor(**kwargs).fit(X,
y.ravel(),
eval_metric = eval_metric,
sample_weight = sample_weight)
self.model_tree[k] = tmp_dict
# create in sample prediction for the reconciliation errors
for k2 in self.hierarchy_short[k]:
ys = [] # ys = y sample ->in sample predict
series = self.hierarchy_short[k][k2][self.col_name]
for i in range(len(series)-self.look_back-1):
tmp = series[i:i+self.look_back] #endogenous features
if self.features is not None:
ex = self.hierarchy_short[k][k2][self.features].iloc[i+self.look_back+1].values
ex = ex if np.size(ex) else None # exogenous features
else:
ex = None
ys.append(self._one_step_ahead(tmp,self.model_tree[k][k2],ex = ex)[0])
fitted_values = self.look_back*[ys[0]] + ys
ld = len(self.hierarchy_short[k][k2]) - len(fitted_values)
# add extra data for point where we could not forecast due to exogenous regressors
self.model_tree[k][k2].fittedvalues = pd.Series(np.array(fitted_values+ld*[np.mean(ys)]))
else:
for k in tqdm(self.hierarchy, disable = disable_tqdm):
horizon = self.n_nodes[k]
ys = [] # ys = y sample ->in sample predict
#fit the model
X = self.prepare_x(self.hierarchy[k],horizon = horizon,features = self.features)
y = self.prepare_y(self.hierarchy[k],horizon = horizon)
self.model_tree[k] = RegLGBM(X,y,
eval_metric = eval_metric,
sample_weight = sample_weight,
**kwargs)
# create in sample prediction for the reconciliation errors
series = self.hierarchy[k][self.col_name]
node = self.n_nodes[k]
for i in range(int(np.ceil((len(series)-self.look_back)/node))-horizon):
if self.features is not None:
ex = self.hierarchy[k][self.features].iloc[i+self.look_back+horizon].values
ex = ex if np.size(ex) else None # ex ogenous features
else:
ex = None
tmp = series[i*node:i*node+self.look_back] #endogenous features
ys.extend(self._one_step_ahead(tmp,self.model_tree[k],ex = ex))
# make the lengths consistent, the redundant data will be removed after
fitted_values = self.look_back*[ys[0]] + ys
ld = len(self.hierarchy_short[k][0]) - len(fitted_values)
# add extra data for point where we could not forecast due to exogenous regressors
fitted_values = np.array(fitted_values+ld*[np.mean(ys)])
self.model_tree[k].fittedvalues = pd.Series(fitted_values[:len(series)])
# compute in sample errors for reconcilitation
self._compute_errors(to_remove = self.look_back)
if self.error_df.isna().any().any():
self.error_df = self.error_df.fillna(method='ffill')
def _one_step_ahead(self,series,lgbm_model,ex=None):
if ex is None:
return lgbm_model.predict(np.array(series[-self.look_back:].values).reshape(1, -1))
else:
x_in = np.hstack([np.array(series[-self.look_back:].values),ex]).reshape(1, -1)
return lgbm_model.predict(x_in)
def forecast(self,h,ex=None):
'''Empty docstring'''
self.forecast_tree = {}
if self.use_short:
for k in self.model_tree:
tmp_dict = {}
for k2 in self.model_tree[k]:
###DO YOUR THING WITH LGBM
series = self.hierarchy_short[k][k2][self.col_name].copy()
for i in range(h):
if ex is None:
y_pred = self._one_step_ahead(series,self.model_tree[k][k2])[0]
else:
#pdb.set_trace()
y_pred = self._one_step_ahead(series,self.model_tree[k][k2],ex = ex[i,:])[0] # forecasted value
series = series.append(pd.Series(data = y_pred,index = [series.index.argmax() + 1]))
tmp_dict[k2] = series[-h:]
self.forecast_tree[k] = tmp_dict
else:
for k,node in zip(self.model_tree,self.n_nodes):
mdl = self.model_tree[k]
series = self.hierarchy[k][self.col_name]
for i in range(h):
if ex is None:
series = series.append(pd.Series(self._one_step_ahead(series,mdl))).reset_index(drop=True)
else:
series = series.append(pd.Series(self._one_step_ahead(series,mdl,ex=ex[i,:]))).reset_index(drop=True)
tmp = series[-int(h*self.n_nodes[k]):] # retrieve the forecasted values
tmp_dict = {}
for k2 in range(int(node)):
tmp_dict[k2] = tmp[k2::int(node)]
self.forecast_tree[k] = tmp_dict
self.yhat = self.compute_y_hat(self.forecast_tree)
| 49.102857
| 125
| 0.509368
|
1b75b21e78d9d4278036883c5d97923a2f0fdaee
| 1,825
|
py
|
Python
|
benchmarks/Evolution/both/evo_tests/fetch_json_tests/html_test_case.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 3
|
2018-08-03T02:41:29.000Z
|
2021-03-19T03:18:47.000Z
|
benchmarks/Evolution/both/evo_tests/fetch_json_tests/html_test_case.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 3
|
2018-02-04T17:53:56.000Z
|
2018-11-10T17:06:57.000Z
|
benchmarks/Evolution/both/evo_tests/fetch_json_tests/html_test_case.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 1
|
2018-08-04T00:14:12.000Z
|
2018-08-04T00:14:12.000Z
|
__author__ = 'Edwin Cowart, Kevin McDonough'
class JSONTestCase:
""" A JSON Test Case
"""
def __init__(self, name=None, in_json=None, out_json=None):
""" Construct a JSON Test Case
:param name: The name of the JSON Test Cases
:type name: String or None
:param in_json: The in_json data
:type in_json: String or None
:param out_json: The out_json data
:type out_json: String or None
"""
self.name = name
self.in_json = in_json
self.out_json = out_json
def has_name(self):
""" Does this Test Case have a name?
:return: True if this Test Case has a name, False otherwise
:rtype: Boolean
"""
return self.name is not None
def has_in_json(self):
""" Does this Test Case have a in_json?
:return: True if this Test Case has a in_json, False otherwise
:rtype: Boolean
"""
return self.in_json is not None
def has_out_json(self):
""" Does this Test Case have a out_json?
:return: True if this Test Case has a out_json, False otherwise
:rtype: Boolean
"""
return self.out_json is not None
def is_complete(self):
""" Is this Test Case complete?
:return: True if this Test Case complete, False otherwise
:rtype: Boolean
"""
return self.has_name() and self.has_in_json() and self.has_out_json()
def get_in_json_filename(self):
""" Get the full in filename
:return: The in filename
:rtype: String
"""
return self.name + "-in.json"
def get_out_json_filename(self):
""" Get the full out filename
:return: The out filename
:rtype: String
"""
return self.name + "-out.json"
| 30.416667
| 77
| 0.591781
|
b451124b3406d1253972c4c38d5de6ee59f88c18
| 45,032
|
py
|
Python
|
statsmodels/tsa/statespace/representation.py
|
o-P-o/statsmodels
|
ee9f5c0bd7ee7f646bdbaf31fbc295e5a0ab02f8
|
[
"BSD-3-Clause"
] | 1
|
2020-06-18T07:38:11.000Z
|
2020-06-18T07:38:11.000Z
|
statsmodels/tsa/statespace/representation.py
|
o-P-o/statsmodels
|
ee9f5c0bd7ee7f646bdbaf31fbc295e5a0ab02f8
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/tsa/statespace/representation.py
|
o-P-o/statsmodels
|
ee9f5c0bd7ee7f646bdbaf31fbc295e5a0ab02f8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
State Space Representation
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
from .tools import (
find_best_blas_type, validate_matrix_shape, validate_vector_shape
)
from .initialization import Initialization
from . import tools
class OptionWrapper(object):
def __init__(self, mask_attribute, mask_value):
# Name of the class-level bitmask attribute
self.mask_attribute = mask_attribute
# Value of this option
self.mask_value = mask_value
def __get__(self, obj, objtype):
# Return True / False based on whether the bit is set in the bitmask
return bool(getattr(obj, self.mask_attribute, 0) & self.mask_value)
def __set__(self, obj, value):
mask_attribute_value = getattr(obj, self.mask_attribute, 0)
if bool(value):
value = mask_attribute_value | self.mask_value
else:
value = mask_attribute_value & ~self.mask_value
setattr(obj, self.mask_attribute, value)
class MatrixWrapper(object):
def __init__(self, name, attribute):
self.name = name
self.attribute = attribute
self._attribute = '_' + attribute
def __get__(self, obj, objtype):
matrix = getattr(obj, self._attribute, None)
# # Remove last dimension if the array is not actually time-varying
# if matrix is not None and matrix.shape[-1] == 1:
# return np.squeeze(matrix, -1)
return matrix
def __set__(self, obj, value):
value = np.asarray(value, order="F")
shape = obj.shapes[self.attribute]
if len(shape) == 3:
value = self._set_matrix(obj, value, shape)
else:
value = self._set_vector(obj, value, shape)
setattr(obj, self._attribute, value)
obj.shapes[self.attribute] = value.shape
def _set_matrix(self, obj, value, shape):
# Expand 1-dimensional array if possible
if (value.ndim == 1 and shape[0] == 1 and
value.shape[0] == shape[1]):
value = value[None, :]
# Enforce that the matrix is appropriate size
validate_matrix_shape(
self.name, value.shape, shape[0], shape[1], obj.nobs
)
# Expand time-invariant matrix
if value.ndim == 2:
value = np.array(value[:, :, None], order="F")
return value
def _set_vector(self, obj, value, shape):
# Enforce that the vector has appropriate length
validate_vector_shape(
self.name, value.shape, shape[0], obj.nobs
)
# Expand the time-invariant vector
if value.ndim == 1:
value = np.array(value[:, None], order="F")
return value
class Representation(object):
r"""
State space representation of a time series process
Parameters
----------
k_endog : {array_like, int}
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
initial_variance : float, optional
Initial variance used when approximate diffuse initialization is
specified. Default is 1e6.
initialization : Initialization object or str, optional
Initialization method for the initial state. If a string, must be one
of {'diffuse', 'approximate_diffuse', 'stationary', 'known'}.
initial_state : array_like, optional
If `initialization='known'` is used, the mean of the initial state's
distribution.
initial_state_cov : array_like, optional
If `initialization='known'` is used, the covariance matrix of the
initial state's distribution.
nobs : int, optional
If an endogenous vector is not given (i.e. `k_endog` is an integer),
the number of observations can optionally be specified. If not
specified, they will be set to zero until data is bound to the model.
dtype : np.dtype, optional
If an endogenous vector is not given (i.e. `k_endog` is an integer),
the default datatype of the state space matrices can optionally be
specified. Default is `np.float64`.
design : array_like, optional
The design matrix, :math:`Z`. Default is set to zeros.
obs_intercept : array_like, optional
The intercept for the observation equation, :math:`d`. Default is set
to zeros.
obs_cov : array_like, optional
The covariance matrix for the observation equation :math:`H`. Default
is set to zeros.
transition : array_like, optional
The transition matrix, :math:`T`. Default is set to zeros.
state_intercept : array_like, optional
The intercept for the transition equation, :math:`c`. Default is set to
zeros.
selection : array_like, optional
The selection matrix, :math:`R`. Default is set to zeros.
state_cov : array_like, optional
The covariance matrix for the state equation :math:`Q`. Default is set
to zeros.
**kwargs
Additional keyword arguments. Not used directly. It is present to
improve compatibility with subclasses, so that they can use `**kwargs`
to specify any default state space matrices (e.g. `design`) without
having to clean out any other keyword arguments they might have been
passed.
Attributes
----------
nobs : int
The number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive
definite covariance matrix describing
the shocks in the measurement equation.
shapes : dictionary of name:tuple
A dictionary recording the initial shapes
of each of the representation matrices as
tuples.
initialization : str
Kalman filter initialization method. Default is unset.
initial_variance : float
Initial variance for approximate diffuse
initialization. Default is 1e6.
Notes
-----
A general state space model is of the form
.. math::
y_t & = Z_t \alpha_t + d_t + \varepsilon_t \\
\alpha_t & = T_t \alpha_{t-1} + c_t + R_t \eta_t \\
where :math:`y_t` refers to the observation vector at time :math:`t`,
:math:`\alpha_t` refers to the (unobserved) state vector at time
:math:`t`, and where the irregular components are defined as
.. math::
\varepsilon_t \sim N(0, H_t) \\
\eta_t \sim N(0, Q_t) \\
The remaining variables (:math:`Z_t, d_t, H_t, T_t, c_t, R_t, Q_t`) in the
equations are matrices describing the process. Their variable names and
dimensions are as follows
Z : `design` :math:`(k\_endog \times k\_states \times nobs)`
d : `obs_intercept` :math:`(k\_endog \times nobs)`
H : `obs_cov` :math:`(k\_endog \times k\_endog \times nobs)`
T : `transition` :math:`(k\_states \times k\_states \times nobs)`
c : `state_intercept` :math:`(k\_states \times nobs)`
R : `selection` :math:`(k\_states \times k\_posdef \times nobs)`
Q : `state_cov` :math:`(k\_posdef \times k\_posdef \times nobs)`
In the case that one of the matrices is time-invariant (so that, for
example, :math:`Z_t = Z_{t+1} ~ \forall ~ t`), its last dimension may
be of size :math:`1` rather than size `nobs`.
References
----------
.. [*] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
endog = None
r"""
(array) The observation vector, alias for `obs`.
"""
design = MatrixWrapper('design', 'design')
r"""
(array) Design matrix: :math:`Z~(k\_endog \times k\_states \times nobs)`
"""
obs_intercept = MatrixWrapper('observation intercept', 'obs_intercept')
r"""
(array) Observation intercept: :math:`d~(k\_endog \times nobs)`
"""
obs_cov = MatrixWrapper('observation covariance matrix', 'obs_cov')
r"""
(array) Observation covariance matrix:
:math:`H~(k\_endog \times k\_endog \times nobs)`
"""
transition = MatrixWrapper('transition', 'transition')
r"""
(array) Transition matrix:
:math:`T~(k\_states \times k\_states \times nobs)`
"""
state_intercept = MatrixWrapper('state intercept', 'state_intercept')
r"""
(array) State intercept: :math:`c~(k\_states \times nobs)`
"""
selection = MatrixWrapper('selection', 'selection')
r"""
(array) Selection matrix:
:math:`R~(k\_states \times k\_posdef \times nobs)`
"""
state_cov = MatrixWrapper('state covariance matrix', 'state_cov')
r"""
(array) State covariance matrix:
:math:`Q~(k\_posdef \times k\_posdef \times nobs)`
"""
def __init__(self, k_endog, k_states, k_posdef=None,
initial_variance=1e6, nobs=0, dtype=np.float64,
design=None, obs_intercept=None, obs_cov=None,
transition=None, state_intercept=None, selection=None,
state_cov=None, statespace_classes=None, **kwargs):
self.shapes = {}
# Check if k_endog is actually the endog array
endog = None
if isinstance(k_endog, np.ndarray):
endog = k_endog
# If so, assume that it is either column-ordered and in wide format
# or row-ordered and in long format
if (endog.flags['C_CONTIGUOUS'] and
(endog.shape[0] > 1 or nobs == 1)):
endog = endog.T
k_endog = endog.shape[0]
# Endogenous array, dimensions, dtype
self.k_endog = k_endog
if k_endog < 1:
raise ValueError('Number of endogenous variables in statespace'
' model must be a positive number.')
self.nobs = nobs
# Get dimensions from transition equation
if k_states < 1:
raise ValueError('Number of states in statespace model must be a'
' positive number.')
self.k_states = k_states
self.k_posdef = k_posdef if k_posdef is not None else k_states
# Make sure k_posdef <= k_states
# TODO: we could technically allow k_posdef > k_states, but the Cython
# code needs to be more thoroughly checked to avoid seg faults.
if self.k_posdef > self.k_states:
raise ValueError('Dimension of state innovation `k_posdef` cannot'
' be larger than the dimension of the state.')
# Bind endog, if it was given
if endog is not None:
self.bind(endog)
# Record the shapes of all of our matrices
# Note: these are time-invariant shapes; in practice the last dimension
# may also be `self.nobs` for any or all of these.
self.shapes = {
'obs': (self.k_endog, self.nobs),
'design': (self.k_endog, self.k_states, 1),
'obs_intercept': (self.k_endog, 1),
'obs_cov': (self.k_endog, self.k_endog, 1),
'transition': (self.k_states, self.k_states, 1),
'state_intercept': (self.k_states, 1),
'selection': (self.k_states, self.k_posdef, 1),
'state_cov': (self.k_posdef, self.k_posdef, 1),
}
# Representation matrices
# These matrices are only used in the Python object as containers,
# which will be copied to the appropriate _statespace object if a
# filter is called.
scope = locals()
for name, shape in self.shapes.items():
if name == 'obs':
continue
# Create the initial storage array for each matrix
setattr(self, '_' + name, np.zeros(shape, dtype=dtype, order="F"))
# If we were given an initial value for the matrix, set it
# (notice it is being set via the descriptor)
if scope[name] is not None:
setattr(self, name, scope[name])
# Options
self.initial_variance = initial_variance
self.prefix_statespace_map = (statespace_classes
if statespace_classes is not None
else tools.prefix_statespace_map.copy())
# State-space initialization data
self.initialization = kwargs.get('initialization', None)
basic_inits = ['diffuse', 'approximate_diffuse', 'stationary']
if self.initialization in basic_inits:
self.initialize(self.initialization)
elif self.initialization == 'known':
if 'constant' in kwargs:
constant = kwargs['constant']
elif 'initial_state' in kwargs:
# TODO deprecation warning
constant = kwargs['initial_state']
else:
raise ValueError('Initial state must be provided when "known"'
' is the specified initialization method.')
if 'stationary_cov' in kwargs:
stationary_cov = kwargs['stationary_cov']
elif 'initial_state_cov' in kwargs:
# TODO deprecation warning
stationary_cov = kwargs['initial_state_cov']
else:
raise ValueError('Initial state covariance matrix must be'
' provided when "known" is the specified'
' initialization method.')
self.initialize('known', constant=constant,
stationary_cov=stationary_cov)
elif (not isinstance(self.initialization, Initialization) and
self.initialization is not None):
raise ValueError("Invalid state space initialization method.")
# Matrix representations storage
self._representations = {}
# Setup the underlying statespace object storage
self._statespaces = {}
# Caches
self._time_invariant = None
def __getitem__(self, key):
_type = type(key)
# If only a string is given then we must be getting an entire matrix
if _type is str:
if key not in self.shapes:
raise IndexError('"%s" is an invalid state space matrix name'
% key)
matrix = getattr(self, '_' + key)
# See note on time-varying arrays, below
if matrix.shape[-1] == 1:
return matrix[(slice(None),)*(matrix.ndim-1) + (0,)]
else:
return matrix
# Otherwise if we have a tuple, we want a slice of a matrix
elif _type is tuple:
name, slice_ = key[0], key[1:]
if name not in self.shapes:
raise IndexError('"%s" is an invalid state space matrix name'
% name)
matrix = getattr(self, '_' + name)
# Since the model can support time-varying arrays, but often we
# will instead have time-invariant arrays, we want to allow setting
# a matrix slice like mod['transition',0,:] even though technically
# it should be mod['transition',0,:,0]. Thus if the array in
# question is time-invariant but the last slice was excluded,
# add it in as a zero.
if matrix.shape[-1] == 1 and len(slice_) <= matrix.ndim-1:
slice_ = slice_ + (0,)
return matrix[slice_]
# Otherwise, we have only a single slice index, but it is not a string
else:
raise IndexError('First index must the name of a valid state space'
' matrix.')
def __setitem__(self, key, value):
_type = type(key)
# If only a string is given then we must be setting an entire matrix
if _type is str:
if key not in self.shapes:
raise IndexError('"%s" is an invalid state space matrix name'
% key)
setattr(self, key, value)
# If it's a tuple (with a string as the first element) then we must be
# setting a slice of a matrix
elif _type is tuple:
name, slice_ = key[0], key[1:]
if name not in self.shapes:
raise IndexError('"%s" is an invalid state space matrix name'
% key[0])
# Change the dtype of the corresponding matrix
dtype = np.array(value).dtype
matrix = getattr(self, '_' + name)
valid_types = ['f', 'd', 'F', 'D']
if not matrix.dtype == dtype and dtype.char in valid_types:
matrix = getattr(self, '_' + name).real.astype(dtype)
# Since the model can support time-varying arrays, but often we
# will instead have time-invariant arrays, we want to allow setting
# a matrix slice like mod['transition',0,:] even though technically
# it should be mod['transition',0,:,0]. Thus if the array in
# question is time-invariant but the last slice was excluded,
# add it in as a zero.
if matrix.shape[-1] == 1 and len(slice_) == matrix.ndim-1:
slice_ = slice_ + (0,)
# Set the new value
matrix[slice_] = value
setattr(self, name, matrix)
# Otherwise we got a single non-string key, (e.g. mod[:]), which is
# invalid
else:
raise IndexError('First index must the name of a valid state space'
' matrix.')
def _clone_kwargs(self, endog, **kwargs):
"""
Construct keyword arguments for cloning a state space model
Parameters
----------
endog : array_like
An observed time-series process :math:`y`.
**kwargs
Keyword arguments to pass to the new state space representation
model constructor. Those that are not specified are copied from
the specification of the current state space model.
"""
# We always need the base dimensions, but they cannot change from
# the base model when cloning (the idea is: if these need to change,
# need to make a new instance manually, since it's not really cloning).
kwargs['nobs'] = len(endog)
kwargs['k_endog'] = self.k_endog
for key in ['k_states', 'k_posdef']:
val = getattr(self, key)
if key not in kwargs or kwargs[key] is None:
kwargs[key] = val
if kwargs[key] != val:
raise ValueError('Cannot change the dimension of %s when'
' cloning.' % key)
# Get defaults for time-invariant system matrices, if not otherwise
# provided
# Time-varying matrices must be replaced.
for name in self.shapes.keys():
if name == 'obs':
continue
if name not in kwargs:
mat = getattr(self, name)
if mat.shape[-1] != 1:
raise ValueError('The `%s` matrix is time-varying. Cloning'
' this model requires specifying an'
' updated matrix.' % name)
kwargs[name] = mat
# Default is to use the same initialization
kwargs.setdefault('initialization', self.initialization)
return kwargs
def clone(self, endog, **kwargs):
"""
Clone a state space representation while overriding some elements
Parameters
----------
endog : array_like
An observed time-series process :math:`y`.
**kwargs
Keyword arguments to pass to the new state space representation
model constructor. Those that are not specified are copied from
the specification of the current state space model.
Returns
-------
Representation
Notes
-----
If some system matrices are time-varying, then new time-varying
matrices *must* be provided.
"""
kwargs = self._clone_kwargs(endog, **kwargs)
mod = self.__class__(**kwargs)
mod.bind(endog)
return mod
def extend(self, endog, start=None, end=None, **kwargs):
"""
Extend the current state space model, or a specific (time) subset
Parameters
----------
endog : array_like
An observed time-series process :math:`y`.
start : int, optional
The first period of a time-varying state space model to include in
the new model. Has no effect if the state space model is
time-invariant. Default is the initial period.
end : int, optional
The last period of a time-varying state space model to include in
the new model. Has no effect if the state space model is
time-invariant. Default is the final period.
**kwargs
Keyword arguments to pass to the new state space representation
model constructor. Those that are not specified are copied from
the specification of the current state space model.
Returns
-------
Representation
Notes
-----
This method does not allow replacing a time-varying system matrix with
a time-invariant one (or vice-versa). If that is required, use `clone`.
"""
endog = np.atleast_1d(endog)
if endog.ndim == 1:
endog = endog[:, np.newaxis]
nobs = len(endog)
if start is None:
start = 0
if end is None:
end = self.nobs
if start < 0:
start = self.nobs + start
if end < 0:
end = self.nobs + end
if start > self.nobs:
raise ValueError('The `start` argument of the extension within the'
' base model cannot be after the end of the'
' base model.')
if end > self.nobs:
raise ValueError('The `end` argument of the extension within the'
' base model cannot be after the end of the'
' base model.')
if start > end:
raise ValueError('The `start` argument of the extension within the'
' base model cannot be after the `end` argument.')
# Note: if start == end or if end < self.nobs, then we're just cloning
# (no extension)
endog = tools.concat([self.endog[:, start:end].T, endog])
# Extend any time-varying arrays
error_ti = ('Model has time-invariant %s matrix, so cannot provide'
' an extended matrix.')
error_tv = ('Model has time-varying %s matrix, so an updated'
' time-varying matrix for the extension period'
' is required.')
for name, shape in self.shapes.items():
if name == 'obs':
continue
mat = getattr(self, name)
# If we were *not* given an extended value for this matrix...
if name not in kwargs:
# If this is a time-varying matrix in the existing model
if mat.shape[-1] > 1:
# If we have an extension period, then raise an error
# because we should have been given an extended value
if end + nobs > self.nobs:
raise ValueError(error_tv % name)
# If we do not have an extension period, then set the new
# time-varying matrix to be the portion of the existing
# time-varying matrix that corresponds to the period of
# interest
else:
kwargs[name] = mat[..., start:end + nobs]
elif nobs == 0:
raise ValueError('Extension is being performed within-sample'
' so cannot provide an extended matrix')
# If we were given an extended value for this matrix
else:
# TODO: Need to add a check for ndim, and if the matrix has
# one fewer dimensions than the existing matrix, add a new axis
# If this is a time-invariant matrix in the existing model,
# raise an error
if mat.shape[-1] == 1 and self.nobs > 1:
raise ValueError(error_ti % name)
# Otherwise, validate the shape of the given extended value
# Note: we do not validate the number of observations here
# (so we pass in updated_mat.shape[-1] as the nobs argument
# in the validate_* calls); instead, we check below that we
# at least `nobs` values were passed in and then only take the
# first of them as required. This can be useful when e.g. the
# end user knows the extension values up to some maximum
# endpoint, but does not know what the calling methods may
# specifically require.
updated_mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, updated_mat.shape, shape[0],
updated_mat.shape[-1])
else:
validate_matrix_shape(name, updated_mat.shape, shape[0],
shape[1], updated_mat.shape[-1])
if updated_mat.shape[-1] < nobs:
raise ValueError(error_tv % name)
else:
updated_mat = updated_mat[..., :nobs]
# Concatenate to get the new time-varying matrix
kwargs[name] = np.c_[mat[..., start:end], updated_mat]
return self.clone(endog, **kwargs)
def diff_endog(self, new_endog):
# TODO: move this function to tools?
endog = self.endog.T
if len(new_endog) < len(endog):
raise ValueError('Given data (length %d) is too short to diff'
' against model data (length %d).'
% (len(new_endog), len(endog)))
if len(new_endog) > len(endog):
nobs_append = len(new_endog) - len(endog)
endog = np.c_[endog.T, new_endog[-nobs_append:].T * np.nan].T
new_nan = np.isnan(new_endog)
existing_nan = np.isnan(endog)
if np.any(new_nan & ~existing_nan):
raise ValueError('New data cannot have missing values for'
' observations that are non-missing in model'
' data.')
is_revision = ~existing_nan & ~(new_endog == endog)
revision_ix = list(zip(*np.where(is_revision)))
is_new = existing_nan & ~new_nan
new_ix = list(zip(*np.where(is_new)))
return revision_ix, new_ix
@property
def prefix(self):
"""
(str) BLAS prefix of currently active representation matrices
"""
arrays = (
self._design, self._obs_intercept, self._obs_cov,
self._transition, self._state_intercept, self._selection,
self._state_cov
)
if self.endog is not None:
arrays = (self.endog,) + arrays
return find_best_blas_type(arrays)[0]
@property
def dtype(self):
"""
(dtype) Datatype of currently active representation matrices
"""
return tools.prefix_dtype_map[self.prefix]
@property
def time_invariant(self):
"""
(bool) Whether or not currently active representation matrices are
time-invariant
"""
if self._time_invariant is None:
return (
self._design.shape[2] == self._obs_intercept.shape[1] ==
self._obs_cov.shape[2] == self._transition.shape[2] ==
self._state_intercept.shape[1] == self._selection.shape[2] ==
self._state_cov.shape[2]
)
else:
return self._time_invariant
@property
def _statespace(self):
prefix = self.prefix
if prefix in self._statespaces:
return self._statespaces[prefix]
return None
@property
def obs(self):
r"""
(array) Observation vector: :math:`y~(k\_endog \times nobs)`
"""
return self.endog
def bind(self, endog):
"""
Bind data to the statespace representation
Parameters
----------
endog : ndarray
Endogenous data to bind to the model. Must be column-ordered
ndarray with shape (`k_endog`, `nobs`) or row-ordered ndarray with
shape (`nobs`, `k_endog`).
Notes
-----
The strict requirements arise because the underlying statespace and
Kalman filtering classes require Fortran-ordered arrays in the wide
format (shaped (`k_endog`, `nobs`)), and this structure is setup to
prevent copying arrays in memory.
By default, numpy arrays are row (C)-ordered and most time series are
represented in the long format (with time on the 0-th axis). In this
case, no copying or re-ordering needs to be performed, instead the
array can simply be transposed to get it in the right order and shape.
Although this class (Representation) has stringent `bind` requirements,
it is assumed that it will rarely be used directly.
"""
if not isinstance(endog, np.ndarray):
raise ValueError("Invalid endogenous array; must be an ndarray.")
# Make sure we have a 2-dimensional array
# Note: reshaping a 1-dim array into a 2-dim array by changing the
# shape tuple always results in a row (C)-ordered array, so it
# must be shaped (nobs, k_endog)
if endog.ndim == 1:
# In the case of nobs x 0 arrays
if self.k_endog == 1:
endog.shape = (endog.shape[0], 1)
# In the case of k_endog x 0 arrays
else:
endog.shape = (1, endog.shape[0])
if not endog.ndim == 2:
raise ValueError('Invalid endogenous array provided; must be'
' 2-dimensional.')
# Check for valid column-ordered arrays
if endog.flags['F_CONTIGUOUS'] and endog.shape[0] == self.k_endog:
pass
# Check for valid row-ordered arrays, and transpose them to be the
# correct column-ordered array
elif endog.flags['C_CONTIGUOUS'] and endog.shape[1] == self.k_endog:
endog = endog.T
# Invalid column-ordered arrays
elif endog.flags['F_CONTIGUOUS']:
raise ValueError('Invalid endogenous array; column-ordered'
' arrays must have first axis shape of'
' `k_endog`.')
# Invalid row-ordered arrays
elif endog.flags['C_CONTIGUOUS']:
raise ValueError('Invalid endogenous array; row-ordered'
' arrays must have last axis shape of'
' `k_endog`.')
# Non-contiguous arrays
else:
raise ValueError('Invalid endogenous array; must be ordered in'
' contiguous memory.')
# In some corner cases (e.g. np.array(1., ndmin=2) with numpy < 1.8)
# we may still have a non-fortran contiguous array, so double-check
# that now
if not endog.flags['F_CONTIGUOUS']:
endog = np.asfortranarray(endog)
# Set a flag for complex data
self._complex_endog = np.iscomplexobj(endog)
# Set the data
self.endog = endog
self.nobs = self.endog.shape[1]
# Reset shapes
if hasattr(self, 'shapes'):
self.shapes['obs'] = self.endog.shape
def initialize(self, initialization, approximate_diffuse_variance=None,
constant=None, stationary_cov=None):
"""Create an Initialization object if necessary"""
if initialization == 'known':
initialization = Initialization(self.k_states, 'known',
constant=constant,
stationary_cov=stationary_cov)
elif initialization == 'approximate_diffuse':
if approximate_diffuse_variance is None:
approximate_diffuse_variance = self.initial_variance
initialization = Initialization(
self.k_states, 'approximate_diffuse',
approximate_diffuse_variance=approximate_diffuse_variance)
elif initialization == 'stationary':
initialization = Initialization(self.k_states, 'stationary')
elif initialization == 'diffuse':
initialization = Initialization(self.k_states, 'diffuse')
# We must have an initialization object at this point
if not isinstance(initialization, Initialization):
raise ValueError("Invalid state space initialization method.")
self.initialization = initialization
def initialize_known(self, constant, stationary_cov):
"""
Initialize the statespace model with known distribution for initial
state.
These values are assumed to be known with certainty or else
filled with parameters during, for example, maximum likelihood
estimation.
Parameters
----------
constant : array_like
Known mean of the initial state vector.
stationary_cov : array_like
Known covariance matrix of the initial state vector.
"""
constant = np.asarray(constant, order="F")
stationary_cov = np.asarray(stationary_cov, order="F")
if not constant.shape == (self.k_states,):
raise ValueError('Invalid dimensions for constant state vector.'
' Requires shape (%d,), got %s' %
(self.k_states, str(constant.shape)))
if not stationary_cov.shape == (self.k_states, self.k_states):
raise ValueError('Invalid dimensions for stationary covariance'
' matrix. Requires shape (%d,%d), got %s' %
(self.k_states, self.k_states,
str(stationary_cov.shape)))
self.initialize('known', constant=constant,
stationary_cov=stationary_cov)
def initialize_approximate_diffuse(self, variance=None):
"""
Initialize the statespace model with approximate diffuse values.
Rather than following the exact diffuse treatment (which is developed
for the case that the variance becomes infinitely large), this assigns
an arbitrary large number for the variance.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
is 1e6.
"""
if variance is None:
variance = self.initial_variance
self.initialize('approximate_diffuse',
approximate_diffuse_variance=variance)
def initialize_stationary(self):
"""
Initialize the statespace model as stationary.
"""
self.initialize('stationary')
def initialize_diffuse(self):
"""
Initialize the statespace model as stationary.
"""
self.initialize('diffuse')
def _initialize_representation(self, prefix=None):
if prefix is None:
prefix = self.prefix
dtype = tools.prefix_dtype_map[prefix]
# If the dtype-specific representation matrices do not exist, create
# them
if prefix not in self._representations:
# Copy the statespace representation matrices
self._representations[prefix] = {}
for matrix in self.shapes.keys():
if matrix == 'obs':
self._representations[prefix][matrix] = (
self.obs.astype(dtype)
)
else:
# Note: this always makes a copy
self._representations[prefix][matrix] = (
getattr(self, '_' + matrix).astype(dtype)
)
# If they do exist, update them
else:
for matrix in self.shapes.keys():
existing = self._representations[prefix][matrix]
if matrix == 'obs':
# existing[:] = self.obs.astype(dtype)
pass
else:
new = getattr(self, '_' + matrix).astype(dtype)
if existing.shape == new.shape:
existing[:] = new[:]
else:
self._representations[prefix][matrix] = new
# Determine if we need to (re-)create the _statespace models
# (if time-varying matrices changed)
if prefix in self._statespaces:
ss = self._statespaces[prefix]
create = (
not ss.obs.shape[1] == self.endog.shape[1] or
not ss.design.shape[2] == self.design.shape[2] or
not ss.obs_intercept.shape[1] == self.obs_intercept.shape[1] or
not ss.obs_cov.shape[2] == self.obs_cov.shape[2] or
not ss.transition.shape[2] == self.transition.shape[2] or
not (ss.state_intercept.shape[1] ==
self.state_intercept.shape[1]) or
not ss.selection.shape[2] == self.selection.shape[2] or
not ss.state_cov.shape[2] == self.state_cov.shape[2]
)
else:
create = True
# (re-)create if necessary
if create:
if prefix in self._statespaces:
del self._statespaces[prefix]
# Setup the base statespace object
cls = self.prefix_statespace_map[prefix]
self._statespaces[prefix] = cls(
self._representations[prefix]['obs'],
self._representations[prefix]['design'],
self._representations[prefix]['obs_intercept'],
self._representations[prefix]['obs_cov'],
self._representations[prefix]['transition'],
self._representations[prefix]['state_intercept'],
self._representations[prefix]['selection'],
self._representations[prefix]['state_cov']
)
return prefix, dtype, create
def _initialize_state(self, prefix=None, complex_step=False):
# TODO once the transition to using the Initialization objects is
# complete, this should be moved entirely to the _{{prefix}}Statespace
# object.
if prefix is None:
prefix = self.prefix
# (Re-)initialize the statespace model
if isinstance(self.initialization, Initialization):
if not self.initialization.initialized:
raise RuntimeError('Initialization is incomplete.')
self._statespaces[prefix].initialize(self.initialization,
complex_step=complex_step)
else:
raise RuntimeError('Statespace model not initialized.')
class FrozenRepresentation(object):
"""
Frozen Statespace Model
Takes a snapshot of a Statespace model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name:tuple
A dictionary recording the shapes of each of
the representation matrices as tuples.
endog : ndarray
The observation vector.
design : ndarray
The design matrix, :math:`Z`.
obs_intercept : ndarray
The intercept for the observation equation, :math:`d`.
obs_cov : ndarray
The covariance matrix for the observation equation :math:`H`.
transition : ndarray
The transition matrix, :math:`T`.
state_intercept : ndarray
The intercept for the transition equation, :math:`c`.
selection : ndarray
The selection matrix, :math:`R`.
state_cov : ndarray
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : Initialization object
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
"""
_model_attributes = [
'model', 'prefix', 'dtype', 'nobs', 'k_endog', 'k_states',
'k_posdef', 'time_invariant', 'endog', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov', 'missing', 'nmissing', 'shapes', 'initialization',
'initial_state', 'initial_state_cov', 'initial_variance'
]
_attributes = _model_attributes
def __init__(self, model):
# Initialize all attributes to None
for name in self._attributes:
setattr(self, name, None)
# Update the representation attributes
self.update_representation(model)
def update_representation(self, model):
"""Update model Representation"""
# Model
self.model = model
# Data type
self.prefix = model.prefix
self.dtype = model.dtype
# Copy the model dimensions
self.nobs = model.nobs
self.k_endog = model.k_endog
self.k_states = model.k_states
self.k_posdef = model.k_posdef
self.time_invariant = model.time_invariant
# Save the state space representation at the time
self.endog = model.endog
self.design = model._design.copy()
self.obs_intercept = model._obs_intercept.copy()
self.obs_cov = model._obs_cov.copy()
self.transition = model._transition.copy()
self.state_intercept = model._state_intercept.copy()
self.selection = model._selection.copy()
self.state_cov = model._state_cov.copy()
self.missing = np.array(model._statespaces[self.prefix].missing,
copy=True)
self.nmissing = np.array(model._statespaces[self.prefix].nmissing,
copy=True)
# Save the final shapes of the matrices
self.shapes = dict(model.shapes)
for name in self.shapes.keys():
if name == 'obs':
continue
self.shapes[name] = getattr(self, name).shape
self.shapes['obs'] = self.endog.shape
# Save the state space initialization
self.initialization = model.initialization
if model.initialization is not None:
model._initialize_state()
self.initial_state = np.array(
model._statespaces[self.prefix].initial_state, copy=True)
self.initial_state_cov = np.array(
model._statespaces[self.prefix].initial_state_cov, copy=True)
self.initial_diffuse_state_cov = np.array(
model._statespaces[self.prefix].initial_diffuse_state_cov,
copy=True)
| 40.064057
| 79
| 0.583874
|
0c39d2419a0ee61981a3a71ea61a1e0a0a9ccd9e
| 12,952
|
py
|
Python
|
edward/inferences.py
|
twiecki/edward
|
1ac2eeb7f5163915848afd3b027c714255459de3
|
[
"Apache-2.0"
] | 4
|
2016-05-09T18:48:21.000Z
|
2018-03-01T22:50:42.000Z
|
edward/inferences.py
|
twiecki/edward
|
1ac2eeb7f5163915848afd3b027c714255459de3
|
[
"Apache-2.0"
] | null | null | null |
edward/inferences.py
|
twiecki/edward
|
1ac2eeb7f5163915848afd3b027c714255459de3
|
[
"Apache-2.0"
] | 3
|
2016-07-05T14:19:08.000Z
|
2019-09-04T13:48:59.000Z
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
from edward.data import Data
from edward.util import kl_multivariate_normal, log_sum_exp
from edward.variationals import PointMass
try:
import prettytensor as pt
except ImportError:
pass
class Inference:
"""
Base class for inference methods.
Arguments
----------
model: Model
probability model p(x, z)
data: Data, optional
data x
"""
def __init__(self, model, data=Data()):
self.model = model
self.data = data
class MonteCarlo(Inference):
"""
Base class for Monte Carlo methods.
Arguments
----------
model: Model
probability model p(x, z)
data: Data, optional
data x
"""
def __init__(self, *args, **kwargs):
Inference.__init__(self, *args, **kwargs)
class VariationalInference(Inference):
"""
Base class for variational inference methods.
Arguments
----------
model: Model
probability model p(x, z)
variational: Variational
variational model q(z; lambda)
data: Data, optional
data x
"""
def __init__(self, model, variational, data=Data()):
Inference.__init__(self, model, data)
self.variational = variational
def run(self, *args, **kwargs):
"""
A simple wrapper to run the inference algorithm.
"""
sess = self.initialize(*args, **kwargs)
for t in range(self.n_iter):
loss = self.update(sess)
self.print_progress(t, loss, sess)
return sess
def initialize(self, n_iter=1000, n_data=None, n_print=100):
"""
Initialize inference algorithm.
Arguments
----------
n_iter: int, optional
Number of iterations for optimization.
n_data: int, optional
Number of samples for data subsampling. Default is to use all
the data.
n_print: int, optional
Number of iterations for each print progress.
"""
self.n_iter = n_iter
self.n_data = n_data
self.n_print = n_print
self.losses = tf.constant(0.0)
loss = self.build_loss()
# Use ADAM with a decaying scale factor
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate,
global_step,
100, 0.9, staircase=True)
self.train = tf.train.AdamOptimizer(learning_rate).minimize(
loss, global_step=global_step)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
return sess
def update(self, sess):
_, loss = sess.run([self.train, self.losses])
return loss
def print_progress(self, t, losses, sess):
if t % self.n_print == 0:
print("iter %d loss %.2f " % (t, np.mean(losses)))
self.variational.print_params(sess)
def build_loss(self):
raise NotImplementedError()
class MFVI(VariationalInference):
# TODO this isn't MFVI so much as VI where q is analytic
"""
Mean-field variational inference
(Ranganath et al., 2014)
"""
def __init__(self, *args, **kwargs):
VariationalInference.__init__(self, *args, **kwargs)
def initialize(self, n_minibatch=1, score=None, *args, **kwargs):
# TODO if score=True, make Normal do sess.run()
"""
Parameters
----------
n_minibatch: int, optional
Number of samples from variational model for calculating
stochastic gradients.
score: bool, optional
Whether to force inference to use the score function
gradient estimator. Otherwise default is to use the
reparameterization gradient if available.
"""
if score is None and self.variational.is_reparam:
self.score = False
else:
self.score = True
self.n_minibatch = n_minibatch
self.samples = tf.placeholder(shape=(self.n_minibatch, self.variational.num_vars),
dtype=tf.float32,
name='samples')
return VariationalInference.initialize(self, *args, **kwargs)
def update(self, sess):
if self.score:
# TODO the mapping should go here before sampling.
# In principle the mapping should go here but we don't
# want to have to run this twice. Also I've noticed that it
# is significantly slower if I have it here for some reason,
# so I'm leaving this as an open problem.
#x = self.data.sample(self.n_data)
#self.variational.set_params(self.variational.mapping(x))
samples = self.variational.sample(self.samples.get_shape(), sess)
else:
samples = self.variational.sample_noise(self.samples.get_shape())
_, loss = sess.run([self.train, self.losses], {self.samples: samples})
return loss
def build_loss(self):
if self.score and hasattr(self.variational, 'entropy'):
return self.build_score_loss_entropy()
elif self.score:
return self.build_score_loss()
elif not self.score and hasattr(self.variational, 'entropy'):
return self.build_reparam_loss_entropy()
else:
return self.build_reparam_loss()
def build_score_loss(self):
"""
Loss function to minimize, whose gradient is a stochastic
gradient based on the score function estimator.
(Paisley et al., 2012)
"""
# ELBO = E_{q(z; lambda)} [ log p(x, z) - log q(z; lambda) ]
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
q_log_prob = tf.zeros([self.n_minibatch], dtype=tf.float32)
for i in range(self.variational.num_vars):
q_log_prob += self.variational.log_prob_zi(i, self.samples)
self.losses = self.model.log_prob(x, self.samples) - q_log_prob
return -tf.reduce_mean(q_log_prob * tf.stop_gradient(self.losses))
def build_reparam_loss(self):
"""
Loss function to minimize, whose gradient is a stochastic
gradient based on the reparameterization trick.
(Kingma and Welling, 2014)
"""
# ELBO = E_{q(z; lambda)} [ log p(x, z) - log q(z; lambda) ]
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
z = self.variational.reparam(self.samples)
q_log_prob = tf.zeros([self.n_minibatch], dtype=tf.float32)
for i in range(self.variational.num_vars):
q_log_prob += self.variational.log_prob_zi(i, z)
self.losses = self.model.log_prob(x, z) - q_log_prob
return -tf.reduce_mean(self.losses)
def build_score_loss_entropy(self):
"""
Loss function to minimize, whose gradient is a stochastic
gradient based on the score function estimator.
"""
# ELBO = E_{q(z; lambda)} [ log p(x, z) ] + H(q(z; lambda))
# where entropy is analytic
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
q_log_prob = tf.zeros([self.n_minibatch], dtype=tf.float32)
for i in range(self.variational.num_vars):
q_log_prob += self.variational.log_prob_zi(i, self.samples)
x = self.data.sample(self.n_data)
p_log_prob = self.model.log_prob(x, self.samples)
q_entropy = self.variational.entropy()
self.losses = p_log_prob + q_entropy
return tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_prob)) + \
q_entropy
def build_reparam_loss_entropy(self):
"""
Loss function to minimize, whose gradient is a stochastic
gradient based on the reparameterization trick.
"""
# ELBO = E_{q(z; lambda)} [ log p(x, z) ] + H(q(z; lambda))
# where entropy is analytic
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
z = self.variational.reparam(self.samples)
self.losses = self.model.log_prob(x, z) + self.variational.entropy()
return -tf.reduce_mean(self.losses)
class VAE(VariationalInference):
# TODO refactor into MFVI
def __init__(self, *args, **kwargs):
VariationalInference.__init__(self, *args, **kwargs)
def initialize(self, n_data=None):
# TODO refactor to use VariationalInference's initialize()
self.n_data = n_data
# TODO don't fix number of covariates
self.x = tf.placeholder(tf.float32, [self.n_data, 28 * 28])
self.losses = tf.constant(0.0)
loss = self.build_loss()
optimizer = tf.train.AdamOptimizer(1e-2, epsilon=1.0)
# TODO move this to not rely on Pretty Tensor
self.train = pt.apply_optimizer(optimizer, losses=[loss])
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
return sess
def update(self, sess):
x = self.data.sample(self.n_data)
_, loss_value = sess.run([self.train, self.losses], {self.x: x})
return loss_value
def build_loss(self):
# ELBO = E_{q(z | x)} [ log p(x | z) ] - KL(q(z | x) || p(z))
# In general, there should be a scale factor due to data
# subsampling, so that
# ELBO = N / M * ( ELBO using x_b )
# where x^b is a mini-batch of x, with sizes M and N respectively.
# This is absorbed into the learning rate.
with tf.variable_scope("model") as scope:
self.variational.set_params(self.variational.mapping(self.x))
z = self.variational.sample([self.n_data, self.variational.num_vars])
self.losses = tf.reduce_sum(self.model.log_likelihood(self.x, z)) - \
kl_multivariate_normal(self.variational.m,
self.variational.s)
return -self.losses
class KLpq(VariationalInference):
"""
Kullback-Leibler divergence from posterior to variational model,
KL( p(z |x) || q(z) ).
(Cappe et al., 2008)
"""
def __init__(self, *args, **kwargs):
VariationalInference.__init__(self, *args, **kwargs)
def initialize(self, n_minibatch=1, *args, **kwargs):
self.n_minibatch = n_minibatch
self.samples = tf.placeholder(shape=(self.n_minibatch, self.variational.num_vars),
dtype=tf.float32,
name='samples')
return VariationalInference.initialize(self, *args, **kwargs)
def update(self, sess):
samples = self.variational.sample(self.samples.get_shape(), sess)
_, loss = sess.run([self.train, self.losses], {self.samples: samples})
return loss
def build_loss(self):
"""
Loss function to minimize, whose gradient is a stochastic
gradient inspired by adaptive importance sampling.
"""
# loss = E_{q(z; lambda)} [ w_norm(z; lambda) *
# ( log p(x, z) - log q(z; lambda) ) ]
# where
# w_norm(z; lambda) = w(z; lambda) / sum_z( w(z; lambda) )
# w(z; lambda) = p(x, z) / q(z; lambda)
#
# gradient = - E_{q(z; lambda)} [ w_norm(z; lambda) *
# grad_{lambda} log q(z; lambda) ]
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
q_log_prob = tf.zeros([self.n_minibatch], dtype=tf.float32)
for i in range(self.variational.num_vars):
q_log_prob += self.variational.log_prob_zi(i, self.samples)
# 1/B sum_{b=1}^B grad_log_q * w_norm
# = 1/B sum_{b=1}^B grad_log_q * exp{ log(w_norm) }
log_w = self.model.log_prob(x, self.samples) - q_log_prob
# normalized log importance weights
log_w_norm = log_w - log_sum_exp(log_w)
w_norm = tf.exp(log_w_norm)
self.losses = w_norm * log_w
return -tf.reduce_mean(q_log_prob * tf.stop_gradient(w_norm))
class MAP(VariationalInference):
"""
Maximum a posteriori
"""
def __init__(self, model, data=Data(), transform=tf.identity):
variational = PointMass(model.num_vars, transform)
VariationalInference.__init__(self, model, variational, data)
def build_loss(self):
x = self.data.sample(self.n_data)
self.variational.set_params(self.variational.mapping(x))
z = self.variational.get_params()
self.losses = self.model.log_prob(x, z)
return -tf.reduce_mean(self.losses)
| 36.178771
| 90
| 0.603613
|
d202a5abda56b9e3577009497e4e7d84fb8f2c56
| 3,012
|
py
|
Python
|
tg/handler.py
|
pandov/myitacademy
|
e674816f325d68c84b908bf3429fda9b8d14e615
|
[
"Apache-2.0"
] | 1
|
2021-08-03T12:30:59.000Z
|
2021-08-03T12:30:59.000Z
|
tg/handler.py
|
pandov/myitacademy
|
e674816f325d68c84b908bf3429fda9b8d14e615
|
[
"Apache-2.0"
] | null | null | null |
tg/handler.py
|
pandov/myitacademy
|
e674816f325d68c84b908bf3429fda9b8d14e615
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
from io import BytesIO
from src.connections import connections
from src.dlib_wrapper import *
from src.utils import *
from pathlib import Path
# detector = dlib_cnn_face_detector()
detector = dlib_face_frontal_detector()
predictor2d = dlib_face_landmarks_predictor()
predictor3d = get_fm_depth_predictor()
landmarks_transform = get_landmarks_transform()
face_images_transform = get_face_images_transform()
num_classes = 5
predictor_fer_fc = get_fc_fer_predictor(num_classes=num_classes, hidden_size=284)
predictor_fer_cnn = get_cnn_fer_predictor(num_classes=num_classes)
predictor_fer_cascade = get_cascade_fer_predictor(num_classes=num_classes)
get_features = lambda image: get_face_features(image, detector, predictor2d, predictor3d, landmarks_transform)
CACHE_PATH = Path(__file__).absolute().parent.joinpath('cache')
CACHE_PATH.mkdir(exist_ok=True)
fer_colors = {
'anger': (231, 30, 46),
'contempt': (72, 82, 166),
'disgust': (97, 45, 145),
'fear': (25, 0, 0),
'happy': (74, 200, 71),
'sadness': (26, 97, 175),
'surprise': (251, 233, 37),
'neutral': (150, 150, 150),
}
def get_fer_color(name):
color = fer_colors[name]
r, g, b = color
return (b, g, r)
def backup(message, image):
savepath = CACHE_PATH.joinpath(str(message.chat.username))
savepath.mkdir(exist_ok=True)
savename = savepath.joinpath(str(message.date) + '.jpg')
cv2.imwrite(savename.as_posix(), image)
def byte2numpy(byte):
arr = np.frombuffer(byte, dtype=np.uint8)
image = cv2.imdecode(arr, cv2.IMREAD_COLOR)
return image
def numpy2byte(image):
_, data = cv2.imencode('.jpg', image)
bio = BytesIO(data)
bio.name = 'image.jpeg'
bio.seek(0)
return bio
def print_faces(image, rectangles, landmarks, expressions, mesh=False, rect=False):
for i, rectangle in enumerate(rectangles):
x, y, w, h = dlib_rectangle_to_numpy_box(image.shape, rectangle)
output = expressions['output'][i]
if mesh:
color = get_fer_color(output)
for j, k in connections:
cv2.line(image, tuple(landmarks[i][j]), tuple(landmarks[i][k]), color, 1, cv2.LINE_AA)
if rect:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 1)
for n, (expression, values) in enumerate(expressions.items()):
if expression == 'output': continue
scale = 0.45
if expression == output:
expression = '*' + expression
scale = 0.5
value = float(values[i])
text = '{} {}'.format(expression, round(value, 2))
tx, ty = x + w // 5, y + n * 15 - h // 4
cv2.putText(image, text, (tx, ty), cv2.FONT_HERSHEY_SIMPLEX, scale, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image, text, (tx, ty), cv2.FONT_HERSHEY_SIMPLEX, scale, (255, 255, 255), 1, cv2.LINE_AA)
# cv2.rectangle(image, (tx, ty), (tx + w // 3, ty + h // 4 - 10), (0, 0, 0, 50), -1)
| 37.185185
| 112
| 0.64741
|
1e8dff84bf260e37d0a7da4269b967a651c8336e
| 147
|
py
|
Python
|
capitulo-05/ex05.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | 3
|
2021-11-09T17:54:10.000Z
|
2022-01-30T22:32:25.000Z
|
capitulo-05/ex05.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | null | null | null |
capitulo-05/ex05.py
|
bryan-lima/exercicios-livro-introd-prog-python-3ed
|
b6bc26dced9728510865704a80cb0d97f81f756b
|
[
"MIT"
] | null | null | null |
# Reescreva o programa anterior para escrever os 10 primeiros múltiplos de 3
lastNumber = 30
n = 3
while n <= lastNumber:
print(n)
n += 3
| 18.375
| 76
| 0.687075
|
7df9ca3daf76852d74d8144f772a538efc1a64f0
| 447
|
py
|
Python
|
summary.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
summary.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
summary.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
# --------------------------------------------#
# 该部分代码用于看网络结构
# --------------------------------------------#
import torch
from torchsummary import summary
from nets.yolo import YoloBody
if __name__ == "__main__":
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
m = YoloBody([[6, 7, 8], [3, 4, 5], [0, 1, 2]], 80).to(device)
summary(m, input_size=(3, 416, 416))
| 31.928571
| 74
| 0.505593
|
a2f4149c89e5e951abb08e29d11c6a259872a1b0
| 13,695
|
py
|
Python
|
pelita/maze_generator.py
|
DanBenHa/pelita
|
557c3a757a24e0f1abe25f7edf5c4ffee83a077e
|
[
"BSD-2-Clause"
] | null | null | null |
pelita/maze_generator.py
|
DanBenHa/pelita
|
557c3a757a24e0f1abe25f7edf5c4ffee83a077e
|
[
"BSD-2-Clause"
] | null | null | null |
pelita/maze_generator.py
|
DanBenHa/pelita
|
557c3a757a24e0f1abe25f7edf5c4ffee83a077e
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Generate maze layouts for 'pelita', without dead ends.
Algorithm:
* start with an empty grid
* draw a wall with gaps, dividing the grid in 2
* repeat recursively for each sub-grid
* find dead ends
* remove a wall at the dead ends
Players 1,3 always start in the bottom left; 2,4 in the top right
Food is placed randomly (though not too close to the pacmen starting positions)
Notes:
the final map includes a symmetric, flipped copy
the first wall has k gaps, the next wall has k/2 gaps, etc. (min=1)
Inspired by code by Dan Gillick
Completely rewritten by Pietro Berkes
Rewritten again (but not completely) by Tiziano Zito
"""
import random
import sys
import numpy
import networkx as nx
north = (0, -1)
south = (0, 1)
east = (1, 0)
west = (-1, 0)
# character constants for walls, food, and empty spaces
W = b'#'
F = b'.'
E = b' '
def empty_maze(height, width):
"""Return an empty maze with external walls.
A maze is a 2D array of characters representing walls, food, and agents.
An empty maze is made of empty tiles, except for the external walls.
"""
maze = numpy.empty((height, width), dtype='c')
maze.fill(E)
# add external walls
maze[0, :].fill(W)
maze[-1, :].fill(W)
maze[:, 0].fill(W)
maze[:, -1].fill(W)
return maze
def maze_to_bytes(maze):
"""Return bytes representation of maze."""
lines = [b''.join(maze[i,:])
for i in range(maze.shape[0])]
return b'\n'.join(lines)
def maze_to_str(maze):
"""Return a ascii-string representation of maze."""
bytes_ = maze_to_bytes(maze)
return bytes_.decode('ascii')
def bytes_to_maze(bytes_):
"""Return a maze numpy bytes array from a bytes representation."""
rows = []
for line in bytes_.splitlines():
line = line.strip()
if len(line) == 0:
# skip empty lines
continue
cols = []
for idx in range(len(line.strip())):
# this crazyness is needed because bytes do not iterate like
# strings: see the comments about iterating over bytes in
# https://docs.python.org/3/library/stdtypes.html#bytes-objects
cols.append(line[idx:idx+1])
rows.append(cols)
maze = numpy.array(rows, dtype=bytes)
return maze
def str_to_maze(str_):
"""Return a maze numpy bytes array from a ascii string representation."""
bytes_maze = str_.encode('ascii')
return bytes_to_maze(bytes_maze)
def create_half_maze(maze, ngaps_center):
"""Fill the left half of the maze with random walls.
The second half can be created by mirroring the left part using
the 'complete_maze' function.
"""
# first, we need a wall in the middle
# the gaps in the central wall have to be chosen such that they can
# be mirrored
ch = maze.shape[0] - 2
candidates = list(range(ch//2))
random.shuffle(candidates)
half_gaps_pos = candidates[:ngaps_center // 2]
gaps_pos = []
for pos in half_gaps_pos:
gaps_pos.append(pos)
gaps_pos.append(ch - pos - 1)
# make wall
_add_wall_at(maze, (maze.shape[1] - 2) // 2 - 1, ngaps_center,
vertical=True, gaps_pos=gaps_pos)
# then, fill the left half with walls
_add_wall(maze[:, :maze.shape[1] // 2], ngaps_center // 2, vertical=False)
def _add_wall_at(maze, pos, ngaps, vertical, gaps_pos=None):
"""
add a wall with gaps
maze -- maze where to place wall, plus a border of one element
pos -- position where to put the wall within the center of the maze
(border excluded)
"""
if not vertical:
maze = maze.T
center = maze[1:-1, 1:-1]
ch, cw = center.shape
# place wall
center[:, pos].fill(W)
# place gaps
ngaps = max(1, ngaps)
# choose position of gaps if necessary
if gaps_pos is None:
# choose aandom positions
gaps_pos = list(range(ch))
random.shuffle(gaps_pos)
gaps_pos = gaps_pos[:ngaps]
# do not block entrances
if maze[0][pos + 1] == E:
gaps_pos.insert(0, 0)
if maze[-1][pos + 1] == E:
gaps_pos.insert(0, ch - 1)
for gp in gaps_pos:
center[gp, pos] = E
sub_mazes = [maze[:, :pos + 2], maze[:, pos + 1:]]
if not vertical:
sub_mazes = [sm.T for sm in sub_mazes]
return sub_mazes
def _add_wall(maze, ngaps, vertical):
"""Recursively build the walls of the maze.
grid -- 2D array of characters representing the maze
ngaps -- number of empty spaces to leave in the wall
vertical -- if True, create a vertical wall, otherwise horizontal
"""
h, w = maze.shape
center = maze[1:-1, 1:-1]
ch, cw = center.shape
# no space for walls, interrupt recursion
if ch < 3 and cw < 3:
return
size = cw if vertical else ch
# create a wall only if there is some space in this direction
min_size = random.randint(3, 5)
if size >= min_size:
# place the wall at random spot
pos = random.randint(1, size-2)
sub_mazes = _add_wall_at(maze, pos, ngaps, vertical)
# recursively add walls
for sub_maze in sub_mazes:
_add_wall(sub_maze, max(1, ngaps // 2), not vertical)
def walls_to_graph(maze):
"""Transform a maze in a graph.
The data on the nodes correspond to their coordinates, data on edges is
the actions to take to transition to that edge.
Returns:
graph -- a Graph
first_node -- the first node in the Graph
"""
h, w = maze.shape
directions = [west, east, north, south]
graph = nx.Graph()
# define nodes for maze
for x in range(w):
for y in range(h):
if maze[y, x] != W:
graph.add_node((x,y))
# this is a free position, get its neighbors too
for dx, dy in directions:
nbx, nby = (x+dx, y+dy)
# do not go out of bounds
try:
if maze[nby, nbx] == E:
graph.add_edge((x, y), (nbx, nby))
except IndexError:
# this move brought us out of the maze, just ignore it
continue
return graph
def find_dead_ends(graph, width):
"""Find dead ends in a graph."""
dead_ends = []
for node in graph.nodes():
if graph.degree(node) == 1 and node[0] < width-1:
dead_ends.append(node)
return dead_ends
def remove_dead_end(dead_node, maze):
"""Remove one dead end in a maze."""
h, w = maze.shape
# loop through the neighboring positions and remove the first wall we find
# as long as it is not on the outer border or in the middle of the maze
# not in the central wall x==w//2-1
directions = (north, south, east, west)
for direction in directions:
nbx = dead_node[0]+direction[0]
nby = dead_node[1]+direction[1]
if nbx not in (0,w-1,w//2-1) and nby not in (0,h-1):
neighbor = maze[nby, nbx]
if neighbor == W:
maze[nby, nbx] = E
break
def remove_all_dead_ends(maze):
height, width = maze.shape
while True:
maze_graph = walls_to_graph(maze)
dead_ends = find_dead_ends(maze_graph, width)
if len(dead_ends) == 0:
break
remove_dead_end(dead_ends[0], maze)
def find_chamber(graph):
"""Detect chambers (rooms with a single square entrance).
Return (entrance, chamber), where `entrance` is the node representing the
entrance to the chamber (None if no chamber is found), and `chamber` is the
list of nodes within the chamber (empty list if no nodes are in the chamber).
The entrance to a chamber is a node that when removed from the graph
will result in the graph to be split into two disconnected graphs."""
# minimum_node_cut returns a set of nodes of minimum cardinality that
# disconnects the graph. This means that we have a chamber if the length
# of this set is one, i.e. there is one node that when removed disconnects
# the graph
cuts = nx.minimum_node_cut(graph)
if len(cuts) > 1:
# no chambers, yeah!
return None, []
entrance = cuts.pop()
# remove the cut, i.e. put a wall on the entrance
lgraph = nx.restricted_view(graph, [entrance],[])
# now get the resulting subgraphs
subgraphs = sorted(nx.connected_components(lgraph), key=len)
# let's get the smallest subgraph: this is going to be a chamber
# (other subgraphs are other chambers (if any) and the 'rest' of the graph
# return a list of nodes, instead of a set
chamber = list(subgraphs[0])
return entrance, chamber
def get_neighboring_walls(maze, locs):
"""Given a list of coordinates in the maze, return all neighboring walls.
Walls on the outer border are ignored automatically."""
height, width = maze.shape
walls = []
seen = []
for nodex, nodey in locs:
# if we are already on the border, skip this node
if nodex<=0 or nodex>=(width-1) or nodey<=0 or nodey>=(height-1):
continue
# explore all directions around the current node
for dirx, diry in (north, south, east, west):
# get coordinates of neighbor in direction (dirx, diry)
adjx, adjy = nodex+dirx, nodey+diry
if (adjx, adjy) in seen:
# we have visited this neighbor already
continue
else:
seen.append((adjx, adjy))
# check that we still are inside the maze
if adjx<=0 or adjx>=(width-1) or adjy<=0 or adjy>=(height-1):
# the neighbor is out of the maze
continue
if maze[adjy,adjx] == W:
# this is a wall, store it
walls.append((adjx, adjy))
return walls
def remove_all_chambers(maze):
maze_graph = walls_to_graph(maze)
# this will find one of the chambers, if there is any
entrance, chamber = find_chamber(maze_graph)
while entrance is not None:
# get all the walls around the chamber
walls = get_neighboring_walls(maze, chamber)
# choose a wall at random among the neighboring one and get rid of it
bad_wall = random.choice(walls)
maze[bad_wall[1], bad_wall[0]] = E
# we may have opened a door into this chamber, but there may be more
# chambers to get rid of. Or, the wall we picked wasn't good enough and
# didn't really open a new door to the chamber. I have no idea how to
# distinguish this two cases. If we knew how to, we would spare quite
# a few iterations here?
# Well, as long as we keep on doing this we will eventually get rid
# of all the chambers
maze_graph = walls_to_graph(maze)
entrance, chamber = find_chamber(maze_graph)
def add_food(maze, max_food):
"""Add max_food pellets on the left side of the maze.
We exclude the pacmen's starting positions and the central dividing border
"""
if max_food == 0:
# no food needs to be added, return here
return
h, w = maze.shape
pacmen = [(1,h-2), (1,h-3)]
# get all free slots on the left side, excluding the dividing border
free_y, free_x = numpy.where(maze[:,:w//2-1] == E)
# convert it to a list of coordinate tuples
free = list(zip(free_x, free_y))
# remove the pacmen starting coordinates (we have to check that they are
# indeed free before try to remove them
[free.remove(pacman) for pacman in pacmen if pacman in free]
# check if we have any free slots left
if len(free) == 0 and max_food > 0:
raise ValueError(f'No space left for food in maze')
elif max_food > len(free):
# check if we can indeed fit so much food in the maze
raise ValueError(f'Can not fit {max_food} pellet in {len(free)} free slots')
elif max_food < 0:
raise ValueError(f'Can not add negative number of food ({max_food} given)')
# now take max_food random positions out of this list
food = random.sample(free, max_food)
# fit it in the maze
for col, row in food:
maze[row, col] = F
def add_pacmen(maze):
## starting pacmen positions
maze[-2, 1] = b'2'
maze[-3, 1] = b'0'
maze[1, -2] = b'3'
maze[2, -2] = b'1'
def get_new_maze(height, width, nfood, seed=None):
"""Create a new maze in text format.
The maze is created with a recursive creation algorithm. The maze part of
the blue team is a center-mirror version of the one for the red team.
The function reserves space for 2 PacMan for each team in upper-right
and lower-left corners of the maze. Food is added at random.
Input arguments:
height, width -- the size of the maze, including the outer walls
nfood -- number of food dots for each team
seed -- if not None, the random seed used to generate the maze
"""
if width%2 != 0:
raise ValueError(f'Width must be even ({width} given)')
if seed is None:
seed = random.randint(1, 2 ** 31 - 1)
random.seed(seed)
maze = empty_maze(height, width)
create_half_maze(maze, height // 2)
# make space for pacman (2 pacman each)
maze[-2, 1] = E
maze[-3, 1] = E
# remove dead ends
remove_all_dead_ends(maze)
# remove chambers
remove_all_chambers(maze)
# add food
add_food(maze, nfood)
# complete right part of maze with mirror copy
maze[:, width // 2:] = numpy.flipud(numpy.fliplr(maze[:, :width // 2]))
# add pacman
add_pacmen(maze)
return maze_to_str(maze)
| 32.147887
| 84
| 0.623512
|
d97485675f99c8ef7071a4e23a53b07780e7c4fa
| 881
|
py
|
Python
|
deeppavlov/core/models/nn_model.py
|
khakhulin/DeepPavlov
|
5f631cc887aa935f2e67b7c65a19c7e777cf7db7
|
[
"Apache-2.0"
] | 1
|
2018-07-18T11:50:45.000Z
|
2018-07-18T11:50:45.000Z
|
deeppavlov/core/models/nn_model.py
|
khakhulin/DeepPavlov
|
5f631cc887aa935f2e67b7c65a19c7e777cf7db7
|
[
"Apache-2.0"
] | null | null | null |
deeppavlov/core/models/nn_model.py
|
khakhulin/DeepPavlov
|
5f631cc887aa935f2e67b7c65a19c7e777cf7db7
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
from .component import Component
from .serializable import Serializable
class NNModel(Component, Serializable):
@abstractmethod
def train_on_batch(self, x: list, y: list):
pass
def process_event(self, event_name, data):
pass
| 29.366667
| 72
| 0.76504
|
bd5be189c12eb187b2e4b895f42d9ee4e79c85b0
| 1,683
|
py
|
Python
|
publictitles/resources.py
|
LandRegistry/public-titles
|
1d52e5dd80e4632d98f40356262819bbf5c907ed
|
[
"MIT"
] | null | null | null |
publictitles/resources.py
|
LandRegistry/public-titles
|
1d52e5dd80e4632d98f40356262819bbf5c907ed
|
[
"MIT"
] | null | null | null |
publictitles/resources.py
|
LandRegistry/public-titles
|
1d52e5dd80e4632d98f40356262819bbf5c907ed
|
[
"MIT"
] | null | null | null |
from flask import Response
from flask.ext.restful import Resource, fields, marshal_with, abort, reqparse
from publictitles.models import Title
from publictitles import app, db
class TitleResource(Resource):
resource_fields = { 'title_number': fields.String,
'house_number': fields.String,
'road': fields.String,
'town': fields.String,
'postcode': fields.String,
'price_paid': fields.String}
def __init__(self):
self.parser = reqparse.RequestParser()
for key, val in TitleResource.resource_fields.items():
self.parser.add_argument(key, type=str)
@marshal_with(resource_fields)
def get(self, title_number):
title = Title.query.filter_by( title_number = title_number).first()
if title:
return title
else:
abort(404, message="Title number {} doesn't exist".format(title_number))
def put(self, title_number):
args = self.parser.parse_args()
existing_title = Title.query.filter_by( title_number = args['title_number']).first()
status = 201
if existing_title:
app.logger.info('Title number %s already exists. Replace with %s' % (args['title_number'], args))
db.session.delete(existing_title)
db.session.commit()
status = 200
app.logger.info('Create title with args %s' % args)
title = Title(**args)
db.session.add( title )
db.session.commit()
return Response(status =status)
| 38.25
| 109
| 0.582294
|
077b7075a41e205a16b2d53d41eb915518d3f483
| 4,252
|
py
|
Python
|
azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/models/record_set.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/models/record_set.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/models/record_set.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RecordSet(Model):
"""Describes a DNS record set (a collection of DNS records with the same name
and type).
:param id: The ID of the record set.
:type id: str
:param name: The name of the record set.
:type name: str
:param type: The type of the record set.
:type type: str
:param etag: The etag of the record set.
:type etag: str
:param metadata: The metadata attached to the record set.
:type metadata: dict[str, str]
:param ttl: The TTL (time-to-live) of the records in the record set.
:type ttl: long
:param arecords: The list of A records in the record set.
:type arecords: list[~azure.mgmt.dns.v2016_04_01.models.ARecord]
:param aaaa_records: The list of AAAA records in the record set.
:type aaaa_records: list[~azure.mgmt.dns.v2016_04_01.models.AaaaRecord]
:param mx_records: The list of MX records in the record set.
:type mx_records: list[~azure.mgmt.dns.v2016_04_01.models.MxRecord]
:param ns_records: The list of NS records in the record set.
:type ns_records: list[~azure.mgmt.dns.v2016_04_01.models.NsRecord]
:param ptr_records: The list of PTR records in the record set.
:type ptr_records: list[~azure.mgmt.dns.v2016_04_01.models.PtrRecord]
:param srv_records: The list of SRV records in the record set.
:type srv_records: list[~azure.mgmt.dns.v2016_04_01.models.SrvRecord]
:param txt_records: The list of TXT records in the record set.
:type txt_records: list[~azure.mgmt.dns.v2016_04_01.models.TxtRecord]
:param cname_record: The CNAME record in the record set.
:type cname_record: ~azure.mgmt.dns.v2016_04_01.models.CnameRecord
:param soa_record: The SOA record in the record set.
:type soa_record: ~azure.mgmt.dns.v2016_04_01.models.SoaRecord
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'ttl': {'key': 'properties.TTL', 'type': 'long'},
'arecords': {'key': 'properties.ARecords', 'type': '[ARecord]'},
'aaaa_records': {'key': 'properties.AAAARecords', 'type': '[AaaaRecord]'},
'mx_records': {'key': 'properties.MXRecords', 'type': '[MxRecord]'},
'ns_records': {'key': 'properties.NSRecords', 'type': '[NsRecord]'},
'ptr_records': {'key': 'properties.PTRRecords', 'type': '[PtrRecord]'},
'srv_records': {'key': 'properties.SRVRecords', 'type': '[SrvRecord]'},
'txt_records': {'key': 'properties.TXTRecords', 'type': '[TxtRecord]'},
'cname_record': {'key': 'properties.CNAMERecord', 'type': 'CnameRecord'},
'soa_record': {'key': 'properties.SOARecord', 'type': 'SoaRecord'},
}
def __init__(self, **kwargs):
super(RecordSet, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
self.etag = kwargs.get('etag', None)
self.metadata = kwargs.get('metadata', None)
self.ttl = kwargs.get('ttl', None)
self.arecords = kwargs.get('arecords', None)
self.aaaa_records = kwargs.get('aaaa_records', None)
self.mx_records = kwargs.get('mx_records', None)
self.ns_records = kwargs.get('ns_records', None)
self.ptr_records = kwargs.get('ptr_records', None)
self.srv_records = kwargs.get('srv_records', None)
self.txt_records = kwargs.get('txt_records', None)
self.cname_record = kwargs.get('cname_record', None)
self.soa_record = kwargs.get('soa_record', None)
| 49.44186
| 82
| 0.630997
|
50be50c7f3029a8436bff1d8f72cd6fad42340a4
| 6,974
|
py
|
Python
|
Detector/train.py
|
ColinWine/Accurate-and-rapid-pulmonary-tuberculosis-diagnosis-system
|
7be433b3a495a7c4db2b850a79dc505e413909c4
|
[
"Apache-2.0"
] | null | null | null |
Detector/train.py
|
ColinWine/Accurate-and-rapid-pulmonary-tuberculosis-diagnosis-system
|
7be433b3a495a7c4db2b850a79dc505e413909c4
|
[
"Apache-2.0"
] | null | null | null |
Detector/train.py
|
ColinWine/Accurate-and-rapid-pulmonary-tuberculosis-diagnosis-system
|
7be433b3a495a7c4db2b850a79dc505e413909c4
|
[
"Apache-2.0"
] | 1
|
2022-02-19T09:07:55.000Z
|
2022-02-19T09:07:55.000Z
|
import os
import sys
import time
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch.distributed as dist
from datasets.coco import COCO, COCO_eval
from datasets.pascal import PascalVOC, PascalVOC_eval
from nets.hourglass import get_hourglass
from utils.utils import _tranpose_and_gather_feature, load_model
from utils.image import transform_preds
from utils.losses import _neg_loss, _reg_loss
from utils.summary import create_summary, create_logger, create_saver, DisablePrint
from utils.post_process import ctdet_decode
from validation import evaluation
# Training settings
parser = argparse.ArgumentParser(description='simple_centernet45')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dist', action='store_true')
parser.add_argument('--root_dir', type=str, default='./')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--log_name', type=str, default='test')
parser.add_argument('--pretrain_name', type=str, default='pretrain')
parser.add_argument('--dataset', type=str, default='coco', choices=['coco', 'pascal'])
parser.add_argument('--arch', type=str, default='large_hourglass')
parser.add_argument('--img_size', type=int, default=512)
parser.add_argument('--split_ratio', type=float, default=1.0)
parser.add_argument('--lr', type=float, default=5e-4)
parser.add_argument('--lr_step', type=str, default='40,80,120')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_epochs', type=int, default=160)
parser.add_argument('--test_topk', type=int, default=100)
parser.add_argument('--log_interval', type=int, default=60)
parser.add_argument('--val_interval', type=int, default=5)
parser.add_argument('--num_workers', type=int, default=2)
cfg = parser.parse_args()
os.chdir(cfg.root_dir)
cfg.log_dir = os.path.join(cfg.root_dir, 'logs', cfg.log_name)
cfg.ckpt_dir = os.path.join(cfg.root_dir, 'ckpt', cfg.log_name)
cfg.pretrain_dir = os.path.join(cfg.root_dir, 'ckpt', cfg.pretrain_name, 'checkpoint.t7')
os.makedirs(cfg.log_dir, exist_ok=True)
os.makedirs(cfg.ckpt_dir, exist_ok=True)
cfg.lr_step = [int(s) for s in cfg.lr_step.split(',')]
def main():
saver = create_saver(cfg.local_rank, save_dir=cfg.ckpt_dir)
logger = create_logger(cfg.local_rank, save_dir=cfg.log_dir)
summary_writer = create_summary(cfg.local_rank, log_dir=cfg.log_dir)
print = logger.info
print(cfg)
torch.manual_seed(317)
torch.backends.cudnn.benchmark = True # disable this if OOM at beginning of training
num_gpus = torch.cuda.device_count()
if cfg.dist:
cfg.device = torch.device('cuda:%d' % cfg.local_rank)
torch.cuda.set_device(cfg.local_rank)
dist.init_process_group(backend='nccl', init_method='env://',
world_size=num_gpus, rank=cfg.local_rank)
else:
cfg.device = torch.device('cuda')
print('Setting up data...')
Dataset = COCO if cfg.dataset == 'coco' else PascalVOC
train_dataset = Dataset(cfg.data_dir, 'train', split_ratio=cfg.split_ratio, img_size=cfg.img_size)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
num_replicas=num_gpus,
rank=cfg.local_rank)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=cfg.batch_size // num_gpus
if cfg.dist else cfg.batch_size,
shuffle=not cfg.dist,
num_workers=cfg.num_workers,
pin_memory=True,
drop_last=True,
sampler=train_sampler if cfg.dist else None)
print('Creating model...')
if 'hourglass' in cfg.arch:
model = get_hourglass[cfg.arch]
else:
raise NotImplementedError
if cfg.dist:
# model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(cfg.device)
model = nn.parallel.DistributedDataParallel(model,
device_ids=[cfg.local_rank, ],
output_device=cfg.local_rank)
else:
model = nn.DataParallel(model).to(cfg.device)
if os.path.isfile(cfg.pretrain_dir):
model = load_model(model, cfg.pretrain_dir)
optimizer = torch.optim.Adam(model.parameters(), cfg.lr)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.lr_step, gamma=0.1)
validation_folder=r'L:\FullProcess\FocusDetection\LabelVal'
def train(epoch):
print('\n Epoch: %d' % epoch)
model.train()
tic = time.perf_counter()
for batch_idx, batch in enumerate(train_loader):
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=cfg.device, non_blocking=True)
outputs = model(batch['image'])
hmap, regs, w_h_ = zip(*outputs)
regs = [_tranpose_and_gather_feature(r, batch['inds']) for r in regs]
w_h_ = [_tranpose_and_gather_feature(r, batch['inds']) for r in w_h_]
hmap_loss = _neg_loss(hmap, batch['hmap'])
reg_loss = _reg_loss(regs, batch['regs'], batch['ind_masks'])
w_h_loss = _reg_loss(w_h_, batch['w_h_'], batch['ind_masks'])
loss = hmap_loss + 1 * reg_loss + 0.1 * w_h_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % cfg.log_interval == 0:
duration = time.perf_counter() - tic
tic = time.perf_counter()
print('[%d/%d-%d/%d] ' % (epoch, cfg.num_epochs, batch_idx, len(train_loader)) +
' hmap_loss= %.5f reg_loss= %.5f w_h_loss= %.5f' %
(hmap_loss.item(), reg_loss.item(), w_h_loss.item()) +
' (%d samples/sec)' % (cfg.batch_size * cfg.log_interval / duration))
step = len(train_loader) * epoch + batch_idx
summary_writer.add_scalar('hmap_loss', hmap_loss.item(), step)
summary_writer.add_scalar('reg_loss', reg_loss.item(), step)
summary_writer.add_scalar('w_h_loss', w_h_loss.item(), step)
return
print('Starting training...')
for epoch in range(1, cfg.num_epochs + 1):
train_sampler.set_epoch(epoch)
train(epoch)
if cfg.val_interval > 0 and epoch % cfg.val_interval == 0:
model.eval()
evaluation(model,validation_folder,map_save_name='epoch_' + str(epoch) + '.png')
print(saver.save(model.module.state_dict(), 'checkpoint'))
lr_scheduler.step(epoch) # move to here after pytorch1.1.0
summary_writer.close()
if __name__ == '__main__':
with DisablePrint(local_rank=cfg.local_rank):
main()
| 38.530387
| 100
| 0.659736
|
22c9daeaa0f648562e27b6593c6954571ab20327
| 2,778
|
py
|
Python
|
Web App/SignLanguage/detect.py
|
Aravindhan-G/Sign-Language-Synthesis
|
6f306a3055419eedfe0a6e877ee8ee6967cfa06c
|
[
"MIT"
] | null | null | null |
Web App/SignLanguage/detect.py
|
Aravindhan-G/Sign-Language-Synthesis
|
6f306a3055419eedfe0a6e877ee8ee6967cfa06c
|
[
"MIT"
] | null | null | null |
Web App/SignLanguage/detect.py
|
Aravindhan-G/Sign-Language-Synthesis
|
6f306a3055419eedfe0a6e877ee8ee6967cfa06c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template, Response
import cv2
import time
import pickle
import numpy as np
import mediapipe as mp
detect= Blueprint('detect',__name__)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
drawing_styles = mp.solutions.drawing_styles
mod = pickle.load(open('', 'rb'))#Location of XGBoost.sav
le = pickle.load(open('', 'rb'))#Location of XEncoder1.sav
word = ' '
cap = cv2.VideoCapture(0)
def gen_frames():
global word
prev = time.time()
with mp_hands.Hands(min_detection_confidence=0.67, min_tracking_confidence=0.6) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
break
else:
curr = time.time()
image = cv2.cvtColor(cv2.flip(image,1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
lst = []
for coords in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, coords, mp.solutions.hands.HAND_CONNECTIONS,
drawing_styles.get_default_hand_landmarks_style(),
drawing_styles.get_default_hand_connections_style())
for crd in coords.landmark:
lst.append(crd.x)
lst.append(crd.y)
lst.append(crd.z)
lst = np.asarray(lst).reshape(1,-1)
if (curr-prev) >= 4:
prev = time.time()
txt = predict(lst)
if txt=='del': word = word[:-1]
elif txt == 'space': word += ' '
else: word+=txt
ret, buffer = cv2.imencode('.jpg', image)
frame = buffer.tobytes()
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def predict(coords):
tmp = mod.predict(coords)
txt = str(le.inverse_transform(tmp)).strip("'][")
return txt
@detect.route('/home', methods=['GET','POST'])
def home():
return render_template('index.html')
@detect.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@detect.route('/text_feed')
def text_feed():
return f'Predicted text - {word}'
| 36.077922
| 94
| 0.537077
|
5f3a4f37cf22ce1c6cb864631d290aa8ea0f80d9
| 3,584
|
py
|
Python
|
2_ Preprocessing/properties/MissingData.py
|
DavidStahl97/Predictive-Analytics
|
c607ef2a5fc2b6654122b0b5ef90cb1a23f88eb0
|
[
"MIT"
] | null | null | null |
2_ Preprocessing/properties/MissingData.py
|
DavidStahl97/Predictive-Analytics
|
c607ef2a5fc2b6654122b0b5ef90cb1a23f88eb0
|
[
"MIT"
] | null | null | null |
2_ Preprocessing/properties/MissingData.py
|
DavidStahl97/Predictive-Analytics
|
c607ef2a5fc2b6654122b0b5ef90cb1a23f88eb0
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import missingno as msno
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
def review_data():
# Read csv file into pandas dataframe
propertydata = pd.read_csv("propertydata.csv")
# Print first five rows
print(propertydata.head())
# Print whole table
print(propertydata)
# Print all columns separately
print(propertydata['ST_NUM'])
print(propertydata['ST_NAME'])
print(propertydata['OWN_OCCUPIED'])
print(propertydata['NUM_BEDROOMS'])
print(propertydata['NUM_BATH'])
print(propertydata['SQ_FT'])
# Visualize gaps in data
msno.matrix(propertydata)
plt.show()
msno.matrix(propertydata)
msno.bar(propertydata, color="blue")
plt.show()
def preprocess_data():
# Read csv file into pandas dataframe, replace missing values with NaN
# Handle NUM_BEDROOMS and SQ_FT
propertydata = pd.read_csv("propertydata.csv", na_values=["na", "--"])
print(propertydata['ST_NAME'])
print(propertydata['NUM_BEDROOMS'])
print(propertydata['SQ_FT'])
# Handle OWN_OCCUPIED
cnt = 0
for row in propertydata['OWN_OCCUPIED']:
try:
# Try to cast value to int
int(row)
# If possible, replace that value
propertydata.loc[cnt, 'OWN_OCCUPIED'] = np.nan
except ValueError:
pass
cnt += 1
print(propertydata['OWN_OCCUPIED'])
# Handle NUM_BATH
cnt = 0
for row in propertydata['NUM_BATH']:
try:
# Try to cast value to float
float(row)
except ValueError:
# If NOT possible, replace that value
propertydata.loc[cnt, 'NUM_BATH'] = np.nan
cnt += 1
print(propertydata['NUM_BATH'])
# Show missing value matrix
msno.matrix(propertydata)
plt.show()
# Simulate listwise deletion
print(propertydata.dropna())
# Handle ST_NUM, NUM_BEDROOMS, NUM_BATH, SQ_FT
# Perform mean imputation and down-cast to int to get rid of values like 2.167 bedrooms
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
propertydata['ST_NUM'] = imp_mean.fit_transform(propertydata['ST_NUM'].values.reshape(-1, 1))
propertydata['ST_NUM'] = propertydata['ST_NUM'].astype(int)
propertydata['NUM_BEDROOMS'] = imp_mean.fit_transform(propertydata['NUM_BEDROOMS'].values.reshape(-1, 1))
propertydata['NUM_BEDROOMS'] = propertydata['NUM_BEDROOMS'].astype(int)
propertydata['NUM_BATH'] = imp_mean.fit_transform(propertydata['NUM_BATH'].values.reshape(-1, 1))
propertydata['NUM_BATH'] = propertydata['NUM_BATH'].astype(int)
propertydata['SQ_FT'] = imp_mean.fit_transform(propertydata['SQ_FT'].values.reshape(-1, 1))
propertydata['SQ_FT'] = propertydata['SQ_FT'].astype(int)
print(propertydata['ST_NUM'])
print(propertydata['ST_NAME'])
print(propertydata['OWN_OCCUPIED'])
print(propertydata['NUM_BEDROOMS'])
print(propertydata['NUM_BATH'])
print(propertydata['SQ_FT'])
# Show missing value matrix
msno.matrix(propertydata)
plt.show()
# Simulate listwise deletion
print(propertydata.dropna())
# Handle OWN_OCCUPIED
# Perform most frequent imputation
imp_most_frequent = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
propertydata['OWN_OCCUPIED'] = imp_most_frequent.fit_transform(propertydata['OWN_OCCUPIED'].values.reshape(-1, 1))
# Show missing value matrix
msno.matrix(propertydata)
plt.show()
review_data()
# preprocess_data()
| 30.632479
| 118
| 0.681083
|
30b2fcda38e3f910b4f7c7212fb480217de14864
| 301
|
py
|
Python
|
pytest/mod.py
|
free31jafar/Zeref
|
9cfe9f2e753e6d30d974ced6177080575635e17c
|
[
"MIT"
] | null | null | null |
pytest/mod.py
|
free31jafar/Zeref
|
9cfe9f2e753e6d30d974ced6177080575635e17c
|
[
"MIT"
] | null | null | null |
pytest/mod.py
|
free31jafar/Zeref
|
9cfe9f2e753e6d30d974ced6177080575635e17c
|
[
"MIT"
] | null | null | null |
import pytest
from redbot.core import modlog
__all__ = ["mod"]
@pytest.fixture
def mod(config, monkeypatch):
from redbot.core import Config
with monkeypatch.context() as m:
m.setattr(Config, "get_conf", lambda *args, **kwargs: config)
modlog._init()
return modlog
| 18.8125
| 69
| 0.671096
|
f5ea565ea94ce8fa326d82a47f95a13bd5351fcc
| 24,397
|
py
|
Python
|
46/swagger_client/api_client.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | 3
|
2018-08-21T06:14:33.000Z
|
2019-10-18T23:05:50.000Z
|
46/swagger_client/api_client.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | null | null | null |
46/swagger_client/api_client.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
WordNet APIs
You can access ALL WordNet DB.<BR />[Endpoint] https://api.apitore.com/api/46 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from swagger_client.configuration import Configuration
import swagger_client.models
from swagger_client import rest
class ApiClient(object):
"""Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool = ThreadPool()
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
def __del__(self):
self.pool.close()
self.pool.join()
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.configuration.host + resource_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(swagger_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async request, set the async parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async is False or missing,
then the method will return the response directly.
"""
if not async:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types and not hasattr(klass,
'get_real_child_model'):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| 39.223473
| 96
| 0.550519
|
b9a1d22e9b6453c6264fc1b5bf39db08f385a293
| 903
|
py
|
Python
|
tests/test_auth_refresh_request.py
|
ConsenSys/mythx-models
|
e912c2fc6e7d18041310d3b9f0f95085db47ed9b
|
[
"MIT"
] | 2
|
2019-08-26T13:42:28.000Z
|
2019-11-13T15:44:16.000Z
|
tests/test_auth_refresh_request.py
|
ConsenSys/mythx-models
|
e912c2fc6e7d18041310d3b9f0f95085db47ed9b
|
[
"MIT"
] | 22
|
2019-08-26T13:14:55.000Z
|
2021-04-18T14:22:52.000Z
|
tests/test_auth_refresh_request.py
|
ConsenSys/mythx-models
|
e912c2fc6e7d18041310d3b9f0f95085db47ed9b
|
[
"MIT"
] | 6
|
2019-08-29T15:51:38.000Z
|
2021-04-05T11:41:34.000Z
|
from hypothesis import given
from mythx_models.request import AuthRefreshRequest
from .strategies.auth import auth_refresh_request
@given(auth_refresh_request())
def test_serde(response):
obj = AuthRefreshRequest(**response)
assert obj.dict(by_alias=True) == {
"accessToken": response["accessToken"],
"refreshToken": response["refreshToken"],
}
@given(auth_refresh_request())
def test_attributes(request):
parsed = AuthRefreshRequest(**request)
assert parsed.dict(by_alias=True) == {
"accessToken": request["accessToken"],
"refreshToken": request["refreshToken"],
}
assert parsed.headers == {}
assert parsed.payload == {
"jwtTokens": {"access": parsed.access_token, "refresh": parsed.refresh_token}
}
assert parsed.method == "POST"
assert parsed.endpoint == f"v1/auth/refresh"
assert parsed.parameters == {}
| 28.21875
| 85
| 0.688815
|
c1f12be3bcd3311db12df082f6cefce22b8d31fe
| 1,684
|
py
|
Python
|
j3a-crypter/src/file_worker.py
|
PraserX/j3a
|
f98f0cf9f940d9ae9af22cdb6b9e8ec4a8f02367
|
[
"MIT"
] | 1
|
2020-04-23T12:09:58.000Z
|
2020-04-23T12:09:58.000Z
|
j3a-crypter/src/file_worker.py
|
PraserX/j3a
|
f98f0cf9f940d9ae9af22cdb6b9e8ec4a8f02367
|
[
"MIT"
] | null | null | null |
j3a-crypter/src/file_worker.py
|
PraserX/j3a
|
f98f0cf9f940d9ae9af22cdb6b9e8ec4a8f02367
|
[
"MIT"
] | null | null | null |
import codecs
import io
import json
import os
import sys
class FileWorker(object):
""" File worker - easy file loading
There can be a problem with encoding, so we have to try open file as standard
UTF-8 and as a UTF-8 with BOM.
"""
def open_file(self, file):
""" Open specified file """
input_file = self.try_open_as_utf8(file)
if input_file == None:
input_file = self.try_open_as_utf8_bom(file)
return input_file
def open_json_file(self, file):
""" Open specified file and load it as JSON object """
input_json_file = None
# First try open as standard UTF-8 (not BOM)
try:
input_file = self.try_open_as_utf8(file)
input_json_file = json.load(input_file)
except:
input_json_file = None
# Second try open as UTF-8 BOM if firt try fails
if input_json_file == None:
try:
input_file = self.try_open_as_utf8_bom(file)
input_json_file = json.load(input_file)
except:
input_json_file = None
return input_json_file
def try_open_as_utf8(self, file):
""" Method tries open file in utf-8 encoding """
try:
input_file = codecs.open(file, 'r', 'utf-8')
except:
return None
return input_file
def try_open_as_utf8_bom(self, file):
""" Method tries open file in utf-8 bom encoding """
try:
input_file = codecs.open(file, 'r', 'utf-8-sig')
except:
return None
return input_file
| 26.730159
| 81
| 0.570071
|
948e85e26769003f9af6b012a163f9e1a59d48f5
| 3,402
|
py
|
Python
|
Bioinformatics_k-mer_generator_with_Flask/venv/Lib/site-packages/Bio/Align/Applications/_MSAProbs.py
|
ee2110/Machine_Learning_based_web_applications
|
a414f3906e49003230f5440bb7f035a8d72767a6
|
[
"MIT"
] | 2
|
2019-10-25T18:20:34.000Z
|
2019-10-28T15:26:40.000Z
|
Bioinformatics_k-mer_generator_with_Flask/venv/Lib/site-packages/Bio/Align/Applications/_MSAProbs.py
|
ee2110/Machine_Learning_based_web_applications
|
a414f3906e49003230f5440bb7f035a8d72767a6
|
[
"MIT"
] | 1
|
2020-04-25T20:36:07.000Z
|
2020-04-25T20:36:07.000Z
|
site-packages/Bio/Align/Applications/_MSAProbs.py
|
Wristlebane/Pyto
|
901ac307b68486d8289105c159ca702318bea5b0
|
[
"MIT"
] | 1
|
2019-04-12T20:52:12.000Z
|
2019-04-12T20:52:12.000Z
|
# Copyright 2013 by Christian Brueffer. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Command line wrapper for the multiple sequence alignment program MSAProbs."""
from __future__ import print_function
from Bio.Application import _Argument, _Option, _Switch, AbstractCommandline
class MSAProbsCommandline(AbstractCommandline):
"""Command line wrapper for MSAProbs.
http://msaprobs.sourceforge.net
Notes
-----
Last checked against version: 0.9.7
References
----------
Yongchao Liu, Bertil Schmidt, Douglas L. Maskell: "MSAProbs: multiple
sequence alignment based on pair hidden Markov models and partition
function posterior probabilities". Bioinformatics, 2010, 26(16): 1958 -1964
Examples
--------
>>> from Bio.Align.Applications import MSAProbsCommandline
>>> in_file = "unaligned.fasta"
>>> out_file = "aligned.cla"
>>> cline = MSAProbsCommandline(infile=in_file, outfile=out_file, clustalw=True)
>>> print(cline)
msaprobs -o aligned.cla -clustalw unaligned.fasta
You would typically run the command line with cline() or via
the Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="msaprobs", **kwargs):
"""Initialize the class."""
# order of parameters is the same as in msaprobs -help
self.parameters = [
_Option(["-o", "--outfile", "outfile"],
"specify the output file name (STDOUT by default)",
filename=True,
equate=False),
_Option(["-num_threads", "numthreads"],
"specify the number of threads used, and otherwise detect automatically",
checker_function=lambda x: isinstance(x, int)),
_Switch(["-clustalw", "clustalw"],
"use CLUSTALW output format instead of FASTA format"),
_Option(["-c", "consistency"],
"use 0 <= REPS <= 5 (default: 2) passes of consistency transformation",
checker_function=lambda x: isinstance(x, int) and 0 <= x <= 5),
_Option(["-ir", "--iterative-refinement", "iterative_refinement"],
"use 0 <= REPS <= 1000 (default: 10) passes of iterative-refinement",
checker_function=lambda x: isinstance(x, int) and 0 <= x <= 1000),
_Switch(["-v", "verbose"],
"report progress while aligning (default: off)"),
_Option(["-annot", "annot"],
"write annotation for multiple alignment to FILENAME",
filename=True),
_Switch(["-a", "--alignment-order", "alignment_order"],
"print sequences in alignment order rather than input order (default: off)"),
_Option(["-version", "version"],
"print out version of MSAPROBS"),
_Argument(["infile"],
"Multiple sequence input file",
filename=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 41.487805
| 97
| 0.611993
|
bccfb9677d99bf1200ffd241439a9320e9180492
| 6,562
|
py
|
Python
|
tests/test_ppp.py
|
pettni/posipoly
|
a40afd093567f62979bba73eae61a6416009bedc
|
[
"MIT"
] | 1
|
2021-07-09T02:07:44.000Z
|
2021-07-09T02:07:44.000Z
|
tests/test_ppp.py
|
pettni/posipoly
|
a40afd093567f62979bba73eae61a6416009bedc
|
[
"MIT"
] | 1
|
2018-07-24T18:21:44.000Z
|
2018-07-24T18:29:50.000Z
|
tests/test_ppp.py
|
pettni/posipoly
|
a40afd093567f62979bba73eae61a6416009bedc
|
[
"MIT"
] | 1
|
2020-04-28T17:18:59.000Z
|
2020-04-28T17:18:59.000Z
|
import sympy
from sympy.abc import x, y
import numpy as np
from posipoly import *
from posipoly.ppp import *
def test_is_dd():
np.testing.assert_equal(is_dd(np.array([[1, 0],[0, 1]] )), True)
np.testing.assert_equal(is_dd(np.array([[1, 1],[1, 1]] )), True)
np.testing.assert_equal(is_dd(np.array([[1, 1.01],[1.01, 1]] )), False)
np.testing.assert_equal(is_dd(np.array([[1, 1.01,0],[1.01, 1, 0], [0,0,1]] )), False)
np.testing.assert_equal(is_dd(np.array([[1, 0.3,-0.69],[0.3, 1, 0.69], [-0.69,0.69,1]] )), False)
np.testing.assert_equal(is_dd(np.array([[1, 0.3,-0.69],[0.3, 1, 0.69], [-0.69,0.69,1.5]] )), True)
np.testing.assert_equal(is_sdd(np.array([[1, 0],[0, 1]] )), True)
np.testing.assert_equal(is_sdd(np.array([[1, 1],[1, 1]] )), True)
np.testing.assert_equal(is_sdd(np.array([[1, 0.3,-0.69],[0.3, 1, 0.69], [-0.69,0.69,1]] )), False)
np.testing.assert_equal(is_sdd(np.array([[1, 0.3,-0.69],[0.3, 1, 0.69], [-0.69,0.69,1.5]] )), True)
np.testing.assert_equal(is_sdd(np.array([[-1, 0], [0, -1]] )), False)
def test_sdd_index1():
x11,x22,x12,y11,y22,y12,z11,z22,z12 = sympy.symbols('x11,x22,x12,y11,y22,y12,z11,z22,z12')
M = [[x11,x22,x12], [y11,y22,y12], [z11,z22,z12]]
tt = [[0 for i in range(3)] for j in range(3)]
for i in range(3):
for j in range(3):
for idx in sdd_index(i,j,3):
tt[i][j] = tt[i][j] + M[idx[0]][idx[1]]
np.testing.assert_equal(tt[0][0]-x11-y11, sympy.numbers.Zero)
np.testing.assert_equal(tt[0][1]-x12, sympy.numbers.Zero)
np.testing.assert_equal(tt[0][2]-y12, sympy.numbers.Zero)
np.testing.assert_equal(tt[1][1]-x22-z11, sympy.numbers.Zero)
np.testing.assert_equal(tt[1][2]-z12, sympy.numbers.Zero)
np.testing.assert_equal(tt[2][2]-z22-y22, sympy.numbers.Zero)
def test_sdd_index2():
x11,x22,x12 = sympy.symbols('x11,x22,x12')
M = [[x11,x22,x12]]
tt = [[0 for i in range(2)] for j in range(2)]
for i in range(2):
for j in range(2):
for idx in sdd_index(i,j,2):
tt[i][j] = tt[i][j] + M[idx[0]][idx[1]]
np.testing.assert_equal(tt[0][0]-x11, sympy.numbers.Zero)
np.testing.assert_equal(tt[0][1]-x12, sympy.numbers.Zero)
np.testing.assert_equal(tt[1][1]-x22, sympy.numbers.Zero)
def test_ppp_0():
# find minimal value x such that
# [1 1; 1 x] is positive semi-definite
c = np.array([0,0,1])
Aeq = sp.coo_matrix(np.array([[1,0,0], [0,1,0]]))
beq = np.array([1, 1])
env = mosek.Env()
task = env.Task(0,0)
# Add free variables and objective
task.appendvars(3)
task.putvarboundslice(0, 3, [mosek.boundkey.fr] * 3, [0.]*3, [0.]*3 )
task.putcslice(0, 3, c)
task.putobjsense(mosek.objsense.minimize)
task.appendcons(2)
task.putaijlist(Aeq.row, Aeq.col, Aeq.data)
task.putconboundslice(0, 2, [mosek.boundkey.fx] * 2, beq, beq)
add_psd_mosek( task, sp.coo_matrix(np.eye(3)), np.zeros(3) )
task.optimize()
solution = [0.] * len(c)
task.getxxslice(mosek.soltype.itr, 0, len(c), solution)
mat = vec_to_mat(solution)
v, _ = np.linalg.eig(mat)
np.testing.assert_almost_equal(min(v), 0)
def test_ppp_1():
# p(x) = a0 + a1 x + a2 x^2 sos
# p(2) = 1
# max a1
# sos constraint added via variable
prob = PPP()
prob.add_var('s', 1, 2, 'pp')
prob.add_constraint({'s': PTrans.eval(1, 2, [0], [2])}, Polynomial.one(1), 'eq')
prob.set_objective({'s': -PTrans.eval0(1, 1)*PTrans.diff(1,2,0)})
sol, _ = prob.solve('psd')
pol = prob.get_poly('s')
np.testing.assert_almost_equal(pol(2), 1)
np.testing.assert_almost_equal((PTrans.eval0(1, 1)*PTrans.diff(1,2,0)*pol)(1), 0.25)
np.testing.assert_almost_equal([pol[(0,)], pol[(1,)], pol[(2,)]], [0.25, 0.25, 0.0625], decimal=3)
# same problem, sos constraint added via add_constraint
prob = PPP()
prob.add_var('s', 1, 2, 'coef')
prob.add_constraint({'s': PTrans.eval(1, 2, [0], [2])}, Polynomial.one(1), 'eq')
prob.set_objective({'s': -PTrans.eval0(1, 1)*PTrans.diff(1,2,0)})
prob.add_constraint({'s': PTrans.eye(1,2)}, Polynomial.zero(1), 'pp')
sol, _ = prob.solve('psd')
pol = prob.get_poly('s')
np.testing.assert_almost_equal(pol(2), 1)
np.testing.assert_almost_equal((PTrans.eval0(1, 1)*PTrans.diff(1,2,0)*pol)(1), 0.25)
np.testing.assert_almost_equal([pol[(0,)], pol[(1,)], pol[(2,)]], [0.25, 0.25, 0.0625], decimal=3)
def test_ppp1():
tot_deg = 6 # overall degree of problem
sigma_deg = tot_deg - 2 # degree of sigma
n = 2
p = Polynomial.from_sympy(-x**2 - y**2 + x, [x,y])
g = Polynomial.from_sympy(1 - x**2 - y**2, [x,y])
prob = PPP()
prob.add_var('gamma', n, 0, 'coef')
prob.add_var('sigma', n, sigma_deg, 'pp')
prob.add_constraint({'gamma': -PTrans.eye(n, 0, n, tot_deg),
'sigma': -PTrans.mul_pol(n, sigma_deg, g)},
-p, 'pp')
prob.set_objective({'gamma': [-1]})
prob.solve('psd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, -2.)
prob.solve('sdd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, -2.)
def test_ppp2():
tot_deg = 6 # overall degree of problem
sigma_deg = tot_deg - 2 # degree of sigma
n = 2
p = Polynomial.from_sympy(x**2 + y**2, [x,y])
g = Polynomial.from_sympy(1 - x**2 - y**2, [x,y])
prob = PPP()
prob.add_var('gamma', n, 0, 'coef')
prob.add_var('sigma', n, sigma_deg, 'pp')
prob.add_constraint({'gamma': -PTrans.eye(n, 0, n, tot_deg),
'sigma': -PTrans.mul_pol(n, sigma_deg, g)},
-p, 'pp')
prob.set_objective({'gamma': [-1]})
prob.solve('psd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, 0.)
prob.solve('sdd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, 0.)
def test_ppp3():
tot_deg = 6 # overall degree of problem
sigma_deg = tot_deg - 2 # degree of sigma
n = 2
p = Polynomial.from_sympy(2+(x-0.5)**2 + y**2, [x,y])
g = Polynomial.from_sympy(1 - x**2 - y**2, [x,y])
prob = PPP()
prob.add_var('gamma', n, 0, 'coef')
prob.add_var('sigma', n, sigma_deg, 'pp')
prob.add_constraint({'gamma': -PTrans.eye(n, 0, n, tot_deg),
'sigma': -PTrans.mul_pol(n, sigma_deg, g)},
-p, 'pp')
prob.set_objective({'gamma': [-1]})
prob.solve('psd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, 2.)
prob.solve('sdd')
opt_gamma = prob.get_poly('gamma')(0,0)
np.testing.assert_almost_equal(opt_gamma, 2.)
| 33.141414
| 101
| 0.611856
|
fb3d428bcf4bf55da0a938ee0b0a35b9e029083d
| 49,758
|
py
|
Python
|
pandas/tests/resample/test_datetime_index.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 2
|
2019-12-02T11:24:30.000Z
|
2021-02-28T12:13:54.000Z
|
pandas/tests/resample/test_datetime_index.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 1
|
2019-10-31T08:19:49.000Z
|
2019-10-31T08:19:49.000Z
|
pandas/tests/resample/test_datetime_index.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 4
|
2019-10-09T07:52:08.000Z
|
2021-07-12T02:37:59.000Z
|
from datetime import datetime, timedelta
from functools import partial
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, period_range
from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_series_equal,
)
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import BDay, Minute
@pytest.fixture()
def _index_factory():
return date_range
@pytest.fixture
def _index_freq():
return "Min"
@pytest.fixture
def _static_values(index):
return np.random.rand(len(index))
def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype="int64")
b = Grouper(freq=Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
b = Grouper(freq=Minute(5), closed="right", label="right")
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype="float64")
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"closed, expected",
[
(
"right",
lambda s: Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
),
),
(
"left",
lambda s: Series(
[s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range(
"1/1/2000 00:05", periods=3, freq="5min", name="index"
),
),
),
],
)
def test_resample_basic(series, closed, expected):
s = series
expected = expected(s)
result = s.resample("5min", closed=closed, label="right").mean()
assert_series_equal(result, expected)
def test_resample_integerarray():
# GH 25580, resample on IntegerArray
ts = pd.Series(
range(9), index=pd.date_range("1/1/2000", periods=9, freq="T"), dtype="Int64"
)
result = ts.resample("3T").sum()
expected = Series(
[3, 12, 21],
index=pd.date_range("1/1/2000", periods=3, freq="3T"),
dtype="Int64",
)
assert_series_equal(result, expected)
result = ts.resample("3T").mean()
expected = Series(
[1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64"
)
assert_series_equal(result, expected)
def test_resample_basic_grouper(series):
s = series
result = s.resample("5Min").last()
grouper = Grouper(freq=Minute(5), closed="left", label="left")
expected = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"keyword,value",
[("label", "righttt"), ("closed", "righttt"), ("convention", "starttt")],
)
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
msg = "Unsupported value {value} for `{keyword}`".format(
value=value, keyword=keyword
)
with pytest.raises(ValueError, match=msg):
series.resample("5min", **({keyword: value}))
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how(series, downsample_method):
if downsample_method == "ohlc":
pytest.skip("covered by test_resample_how_ohlc")
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
expected = s.groupby(grouplist).agg(downsample_method)
expected.index = date_range("1/1/2000", periods=4, freq="5min", name="index")
result = getattr(
s.resample("5min", closed="right", label="right"), downsample_method
)()
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how_ohlc(series):
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = DataFrame(
s.groupby(grouplist).agg(_ohlc).values.tolist(),
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
columns=["open", "high", "low", "close"],
)
result = s.resample("5min", closed="right", label="right").ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "sum", "prod", "mean", "var", "std"])
def test_numpy_compat(func):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range("20130101", periods=5, freq="s"))
r = s.resample("2s")
msg = "numpy operations are not valid with resample"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(func, 1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(axis=1)
def test_resample_how_callables():
# GH#7929
data = np.arange(5, dtype=np.int64)
ind = date_range(start="2014-01-01", periods=len(data), freq="d")
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_rounding():
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
df = pd.read_csv(
StringIO(data),
parse_dates={"timestamp": ["date", "time"]},
index_col="timestamp",
)
df.index.name = None
result = df.resample("6s").sum()
expected = DataFrame(
{"value": [4, 9, 4, 2]}, index=date_range("2014-11-08", freq="6s", periods=4)
)
assert_frame_equal(result, expected)
result = df.resample("7s").sum()
expected = DataFrame(
{"value": [4, 10, 4, 1]}, index=date_range("2014-11-08", freq="7s", periods=4)
)
assert_frame_equal(result, expected)
result = df.resample("11s").sum()
expected = DataFrame(
{"value": [11, 8]}, index=date_range("2014-11-08", freq="11s", periods=2)
)
assert_frame_equal(result, expected)
result = df.resample("13s").sum()
expected = DataFrame(
{"value": [13, 6]}, index=date_range("2014-11-08", freq="13s", periods=2)
)
assert_frame_equal(result, expected)
result = df.resample("17s").sum()
expected = DataFrame(
{"value": [16, 3]}, index=date_range("2014-11-08", freq="17s", periods=2)
)
assert_frame_equal(result, expected)
def test_resample_basic_from_daily():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample("w-sun").last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/9/2005"]
assert result.iloc[2] == s.iloc[-1]
result = s.resample("W-MON").last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s["1/3/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-TUE").last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s["1/4/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-WED").last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s["1/5/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-THU").last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s["1/6/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-FRI").last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s["1/7/2005"]
assert result.iloc[1] == s["1/10/2005"]
# to biz day
result = s.resample("B").last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/3/2005"]
assert result.iloc[5] == s["1/9/2005"]
assert result.index.name == "index"
def test_resample_upsampling_picked_but_not_correct():
# Test for issue #3020
dates = date_range("01-Jan-2014", "05-Jan-2014", freq="D")
series = Series(1, index=dates)
result = series.resample("D").mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
s = Series(
np.arange(1.0, 6), index=[datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
)
expected = Series(
np.arange(1.0, 6), index=date_range("19750101", periods=5, freq="D")
)
result = s.resample("D").count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample("D").sum()
result2 = s.resample("D").mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
b = Grouper(freq="M")
g = df.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
result = df.resample("A").mean()
assert_series_equal(result["A"], df["A"].resample("A").mean())
result = df.resample("M").mean()
assert_series_equal(result["A"], df["A"].resample("M").mean())
df.resample("M", kind="period").mean()
df.resample("W-WED", kind="period").mean()
@pytest.mark.parametrize(
"loffset", [timedelta(minutes=1), "1min", Minute(1), np.timedelta64(1, "m")]
)
def test_resample_loffset(loffset):
# GH 7687
rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
s = Series(np.random.randn(14), index=rng)
result = s.resample("5min", closed="right", label="right", loffset=loffset).mean()
idx = date_range("1/1/2000", periods=4, freq="5min")
expected = Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1),
)
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample("w-sun").last()
business_day_offset = BDay()
expected = ser.resample("w-sun", loffset=-business_day_offset).last()
assert result.index[0] - business_day_offset == expected.index[0]
def test_resample_loffset_upsample():
# GH 20744
rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
s = Series(np.random.randn(14), index=rng)
result = s.resample(
"5min", closed="right", label="right", loffset=timedelta(minutes=1)
).ffill()
idx = date_range("1/1/2000", periods=4, freq="5min")
expected = Series([s[0], s[5], s[10], s[-1]], index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
def test_resample_loffset_count():
# GH 12725
start_time = "1/1/2000 00:00:00"
rng = date_range(start_time, periods=100, freq="S")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("10S", loffset="1s").count()
expected_index = date_range(start_time, periods=10, freq="10S") + timedelta(
seconds=1
)
expected = Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample("10S", loffset="1s").size()
assert_series_equal(result, expected)
def test_resample_upsample():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample("Min").pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == "index"
def test_resample_how_method():
# GH9915
s = Series(
[11, 22],
index=[
Timestamp("2015-03-31 21:48:52.672000"),
Timestamp("2015-03-31 21:49:52.739000"),
],
)
expected = Series(
[11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[
Timestamp("2015-03-31 21:48:50"),
Timestamp("2015-03-31 21:49:00"),
Timestamp("2015-03-31 21:49:10"),
Timestamp("2015-03-31 21:49:20"),
Timestamp("2015-03-31 21:49:30"),
Timestamp("2015-03-31 21:49:40"),
Timestamp("2015-03-31 21:49:50"),
],
)
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point():
# GH#9756
index = date_range(start="20150101", end="20150331", freq="BM")
expected = DataFrame({"A": Series([21, 41, 63], index=index)})
index = date_range(start="20150101", end="20150331", freq="B")
df = DataFrame({"A": Series(range(len(index)), index=index)}, dtype="int64")
result = df.resample("BM").last()
assert_frame_equal(result, expected)
def test_upsample_with_limit():
rng = date_range("1/1/2000", periods=3, freq="5t")
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample("t").ffill(limit=2)
expected = ts.reindex(result.index, method="ffill", limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit():
rng = date_range("1/1/2000", periods=3, freq="5t")
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample("t").nearest(limit=2)
expected = ts.reindex(result.index, method="nearest", limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(series):
s = series
grouper = Grouper(freq=Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample("5Min").ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs["open"] == s[-6]
assert xs["high"] == s[-6:-1].max()
assert xs["low"] == s[-6:-1].min()
assert xs["close"] == s[-2]
xs = result.iloc[0]
assert xs["open"] == s[0]
assert xs["high"] == s[:5].max()
assert xs["low"] == s[:5].min()
assert xs["close"] == s[4]
def test_resample_ohlc_result():
# GH 12332
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index = index.union(pd.date_range("4-15-2000", "5-15-2000", freq="h"))
s = Series(range(len(index)), index=index)
a = s.loc[:"4-15-2000"].resample("30T").ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:"4-14-2000"].resample("30T").ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range("2013-12-30", "2014-01-07")
index = rng.drop(
[
Timestamp("2014-01-01"),
Timestamp("2013-12-31"),
Timestamp("2014-01-04"),
Timestamp("2014-01-05"),
]
)
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample("B").mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq="B"))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe():
df = (
DataFrame(
{
"PRICE": {
Timestamp("2011-01-06 10:59:05", tz=None): 24990,
Timestamp("2011-01-06 12:43:33", tz=None): 25499,
Timestamp("2011-01-06 12:54:09", tz=None): 25499,
},
"VOLUME": {
Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
},
}
)
).reindex(["VOLUME", "PRICE"], axis=1)
res = df.resample("H").ohlc()
exp = pd.concat(
[df["VOLUME"].resample("H").ohlc(), df["PRICE"].resample("H").ohlc()],
axis=1,
keys=["VOLUME", "PRICE"],
)
assert_frame_equal(exp, res)
df.columns = [["a", "b"], ["c", "d"]]
res = df.resample("H").ohlc()
exp.columns = pd.MultiIndex.from_tuples(
[
("a", "c", "open"),
("a", "c", "high"),
("a", "c", "low"),
("a", "c", "close"),
("b", "d", "open"),
("b", "d", "high"),
("b", "d", "low"),
("b", "d", "close"),
]
)
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index():
# GH 4812
# dup columns with resample raising
df = DataFrame(
np.random.randn(4, 12),
index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq="M") for i in range(12)],
)
df.iloc[3, :] = np.nan
result = df.resample("Q", axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [Period(year=2000, quarter=i + 1, freq="Q") for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample():
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample("B", closed="right", label="right").mean()
result = bs.resample("8H").mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(simple_date_range_series):
ts = simple_date_range_series("1/1/1990", "1/1/2000")
result = ts.resample("A-DEC", kind="period").mean()
expected = ts.resample("A-DEC").mean()
expected.index = period_range("1990", "2000", freq="a-dec")
assert_series_equal(result, expected)
result = ts.resample("A-JUN", kind="period").mean()
expected = ts.resample("A-JUN").mean()
expected.index = period_range("1990", "2000", freq="a-jun")
assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
assert_series_equal(result, expected)
def test_ohlc_5min():
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range("1/1/2000 00:00:00", "1/1/2000 5:59:50", freq="10s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", closed="right", label="right").ohlc()
assert (resampled.loc["1/1/2000 00:00"] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc["1/1/2000 00:05"] == exp).all()
exp = _ohlc(ts["1/1/2000 5:55:01":])
assert (resampled.loc["1/1/2000 6:00:00"] == exp).all()
def test_downsample_non_unique():
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample("M").mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique():
# GH #1077
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
ts.asfreq("B")
def test_resample_axis1():
rng = date_range("1/1/2000", "2/29/2000")
df = DataFrame(np.random.randn(3, len(rng)), columns=rng, index=["a", "b", "c"])
result = df.resample("M", axis=1).mean()
expected = df.T.resample("M").mean().T
tm.assert_frame_equal(result, expected)
def test_resample_anchored_ticks():
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range("1/1/2000 04:00:00", periods=86400, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ["t", "5t", "15t", "30t", "4h", "12h"]
for freq in freqs:
result = ts[2:].resample(freq, closed="left", label="left").mean()
expected = ts.resample(freq, closed="left", label="left").mean()
assert_series_equal(result, expected)
def test_resample_single_group():
mysum = lambda x: x.sum()
rng = date_range("2000-1-1", "2000-2-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
rng = date_range("2000-1-1", "2000-1-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
# GH 3849
s = Series(
[30.1, 31.6],
index=[Timestamp("20070915 15:30:00"), Timestamp("20070915 15:40:00")],
)
expected = Series([0.75], index=[Timestamp("20070915")])
result = s.resample("D").apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base():
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", base=2).mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_float_base():
# GH25161
dt = pd.to_datetime(
["2018-11-26 16:17:43.51", "2018-11-26 16:17:44.51", "2018-11-26 16:17:45.51"]
)
s = Series(np.arange(3), index=dt)
base = 17 + 43.51 / 60
result = s.resample("3min", base=base).size()
expected = Series(3, index=pd.DatetimeIndex(["2018-11-26 16:17:43.51"]))
assert_series_equal(result, expected)
def test_resample_daily_anchored():
rng = date_range("1/1/2000 0:00:00", periods=10000, freq="T")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample("D", closed="left", label="left").mean()
expected = ts.resample("D", closed="left", label="left").mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet():
# GH #1259
rng = date_range("1/1/2000", "12/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("M", kind="period").mean()
exp_index = period_range("Jan-2000", "Dec-2000", freq="M")
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg():
# aggregate a period resampler with a lambda
s2 = Series(
np.random.randint(0, 5, 50),
index=pd.period_range("2012-01-01", freq="H", periods=50),
dtype="float64",
)
expected = s2.to_timestamp().resample("D").mean().to_period()
result = s2.resample("D").agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault():
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0),
]
df = DataFrame.from_records(
all_wins_and_wagers, columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation():
# GH 12202
# validation tests for dtype preservation
df = DataFrame(
{
"date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": Series([5, 6, 7, 8], dtype="int32"),
}
).set_index("date")
result = df.resample("1D").ffill()
assert result.val.dtype == np.int32
result = df.groupby("group").resample("1D").ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coercion():
pytest.importorskip("scipy.interpolate")
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = df.astype("float64").resample("H").mean()["a"].interpolate("cubic")
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet():
# #1327
rng = date_range("1/1/2000", freq="B", periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("W").mean()
expected = ts.resample("W-SUN").mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error():
# #1451
dates = date_range("4/16/2012 20:00", periods=5000, freq="h")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("M")
def test_nanosecond_resample_error():
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(start=pd.to_datetime(start), periods=10, freq="100n")
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg("mean")
exp_indx = pd.date_range(start=pd.to_datetime(exp_start), periods=10, freq="100n")
exp = Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(simple_date_range_series):
# #1471, #1458
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("M").mean()
expected = df.resample("M", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
tm.assert_frame_equal(result, expected)
result = df.resample("M", closed="left").mean()
exp = df.tshift(1, freq="D").resample("M", kind="period").mean()
exp = exp.to_timestamp(how="end")
exp.index = exp.index + Timedelta(1, "ns") - Timedelta(1, "D")
tm.assert_frame_equal(result, exp)
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("Q").mean()
expected = df.resample("Q", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
tm.assert_frame_equal(result, expected)
result = df.resample("Q", closed="left").mean()
expected = df.tshift(1, freq="D").resample("Q", kind="period", closed="left").mean()
expected = expected.to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
tm.assert_frame_equal(result, expected)
ts = simple_date_range_series("2012-04-29 23:00", "2012-04-30 5:00", freq="h")
resampled = ts.resample("M").mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "12/31/2002")
freqs = ["MS", "BMS", "QS-MAR", "AS-DEC", "AS-JUN"]
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday():
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
"2014-10-14 23:06:23.206", periods=3, freq="400L"
) | pd.date_range("2014-10-15 23:00:00", periods=2, freq="2200L")
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample("2200L").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:02.000")
# Ensure right closing works
result = s.resample("2200L", label="right").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:04.200")
def test_corner_cases(simple_period_range_series, simple_date_range_series):
# miscellaneous test coverage
rng = date_range("1/1/2000", periods=12, freq="t")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("5t", closed="right", label="left").mean()
ex_index = date_range("1999-12-31 23:55", periods=4, freq="5t")
tm.assert_index_equal(result.index, ex_index)
len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0]
# it works
result = len0pts.resample("A-DEC").mean()
assert len(result) == 0
# resample to periods
ts = simple_date_range_series("2000-04-28", "2000-04-30 11:00", freq="h")
result = ts.resample("M", kind="period").mean()
assert len(result) == 1
assert result.index[0] == Period("2000-04", freq="M")
def test_anchored_lowercase_buglet():
dates = date_range("4/16/2012 20:00", periods=50000, freq="s")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("d").mean()
def test_upsample_apply_functions():
# #1596
rng = pd.date_range("2012-06-12", periods=4, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("20min").aggregate(["mean", "sum"])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic():
rng = pd.date_range("2012-06-12", periods=200, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample("D").sum()
exp = ts.sort_index().resample("D").sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688():
for dtype in ["int64", "int32", "float64", "float32"]:
df = DataFrame(
[1, 2],
index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype,
)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "4/1/2000")
result = ts.resample("M").apply(lambda x: x.mean())
exp = ts.resample("M").mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample("M").mean()
foo_exp.name = "foo"
bar_exp = ts.resample("M").std()
bar_exp.name = "bar"
result = ts.resample("M").apply([lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ["foo", "bar"]
tm.assert_series_equal(result["foo"], foo_exp)
tm.assert_series_equal(result["bar"], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample("M").aggregate(
{"foo": lambda x: x.mean(), "bar": lambda x: x.std(ddof=1)}
)
tm.assert_series_equal(result["foo"], foo_exp, check_names=False)
tm.assert_series_equal(result["bar"], bar_exp, check_names=False)
def test_resample_unequal_times():
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({"close": 1}, index=bad_ind)
# it works!
df.resample("AS").sum()
def test_resample_consistency():
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range("2002-02-02", periods=4, freq="30T")
s = Series(np.arange(4.0), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq="10T")
s10 = s.reindex(index=i10, method="bfill")
s10_2 = s.reindex(index=i10, method="bfill", limit=2)
rl = s.reindex_like(s10, method="bfill", limit=2)
r10_2 = s.resample("10Min").bfill(limit=2)
r10 = s.resample("10Min").bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper():
# GH 7227
dates1 = [
datetime(2014, 10, 1),
datetime(2014, 9, 3),
datetime(2014, 11, 5),
datetime(2014, 9, 5),
datetime(2014, 10, 8),
datetime(2014, 7, 15),
]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index("A").resample("M").count()
exp_idx = pd.DatetimeIndex(
["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"],
freq="M",
name="A",
)
expected = DataFrame({"B": [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq="M", key="A")).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))
result = df.set_index("A").resample("M").count()
expected = DataFrame(
{"B": [1, 0, 2, 2, 1], "C": [1, 0, 2, 2, 1]},
index=exp_idx,
columns=["B", "C"],
)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq="M", key="A")).count()
assert_frame_equal(result, expected)
def test_resample_nunique():
# GH 12352
df = DataFrame(
{
"ID": {
Timestamp("2015-06-05 00:00:00"): "0010100903",
Timestamp("2015-06-08 00:00:00"): "0010150847",
},
"DATE": {
Timestamp("2015-06-05 00:00:00"): "2015-06-05",
Timestamp("2015-06-08 00:00:00"): "2015-06-08",
},
}
)
r = df.resample("D")
g = df.groupby(pd.Grouper(freq="D"))
expected = df.groupby(pd.Grouper(freq="D")).ID.apply(lambda x: x.nunique())
assert expected.name == "ID"
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample("D").nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq="D")).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_preserves_column_level_names():
# see gh-23222
df = tm.makeTimeDataFrame(freq="1D").abs()
df.columns = pd.MultiIndex.from_arrays(
[df.columns.tolist()] * 2, names=["lev0", "lev1"]
)
result = df.resample("1h").nunique()
tm.assert_index_equal(df.columns, result.columns)
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index2 = pd.date_range("4-15-2000", "5-15-2000", freq="h")
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype="int64")
r = s.resample("M")
# Since all elements are unique, these should all be the same
results = [r.count(), r.nunique(), r.agg(Series.nunique), r.agg("nunique")]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
@pytest.mark.parametrize("n", [10000, 100000])
@pytest.mark.parametrize("k", [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
# use a fixed seed to always have the same uniques
prng = np.random.RandomState(1234)
dr = date_range(start="2015-08-27", periods=n // 10, freq="T")
ts = Series(prng.randint(0, n // k, n).astype("int64"), index=prng.choice(dr, n))
left = ts.resample("30T").nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(), freq="30T")
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side="right")
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype("int64", copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size():
n = 10000
dr = date_range("2015-09-19", periods=n, freq="T")
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample("7T").size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq="7T")
bins = np.searchsorted(ix.values, ts.index.values, side="right")
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype("int64", copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst():
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=["ts"])
dti1 = DatetimeIndex(
pd.to_datetime(df1.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid")
)
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=["ts"])
dti2 = DatetimeIndex(
pd.to_datetime(df2.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid")
)
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule="H").sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_groupby_with_dst_time_change():
# GH 24972
index = pd.DatetimeIndex(
[1478064900001000000, 1480037118776792000], tz="UTC"
).tz_convert("America/Chicago")
df = pd.DataFrame([1, 2], index=index)
result = df.groupby(pd.Grouper(freq="1d")).last()
expected_index_values = pd.date_range(
"2016-11-02", "2016-11-24", freq="d", tz="America/Chicago"
)
index = pd.DatetimeIndex(expected_index_values)
expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_dst_anchor():
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz="US/Eastern")
df = DataFrame([5], index=dti)
assert_frame_equal(
df.resample(rule="D").sum(), DataFrame([5], index=df.index.normalize())
)
df.resample(rule="MS").sum()
assert_frame_equal(
df.resample(rule="MS").sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], tz="US/Eastern")),
)
dti = date_range("2013-09-30", "2013-11-02", freq="30Min", tz="Europe/Paris")
values = range(dti.size)
df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype="int64")
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193],
},
index=date_range("9/30/2013", "11/4/2013", freq="W-MON", tz="Europe/Paris"),
),
"W-MON Frequency",
)
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193],
},
index=date_range(
"9/30/2013", "11/11/2013", freq="2W-MON", tz="Europe/Paris"
),
),
"2W-MON Frequency",
)
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 48, 1538], "b": [47, 1537, 1586], "c": [48, 1490, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="MS", tz="Europe/Paris"),
),
"MS Frequency",
)
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 1538], "b": [1537, 1586], "c": [1538, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="2MS", tz="Europe/Paris"),
),
"2MS Frequency",
)
df_daily = df["10/26/2013":"10/29/2013"]
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})[
["a", "b", "c"]
],
DataFrame(
{
"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48],
},
index=date_range("10/26/2013", "10/29/2013", freq="D", tz="Europe/Paris"),
),
"D Frequency",
)
def test_downsample_across_dst():
# GH 8531
tz = pytz.timezone("Europe/Berlin")
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq="2H")
result = Series(5, index=dates).resample("H").mean()
expected = Series(
[5.0, np.nan] * 3 + [5.0],
index=date_range(tz.localize(dt), periods=7, freq="H"),
)
tm.assert_series_equal(result, expected)
def test_downsample_across_dst_weekly():
# GH 9119, GH 21459
df = DataFrame(
index=DatetimeIndex(
["2017-03-25", "2017-03-26", "2017-03-27", "2017-03-28", "2017-03-29"],
tz="Europe/Amsterdam",
),
data=[11, 12, 13, 14, 15],
)
result = df.resample("1W").sum()
expected = DataFrame(
[23, 42],
index=pd.DatetimeIndex(["2017-03-26", "2017-04-02"], tz="Europe/Amsterdam"),
)
tm.assert_frame_equal(result, expected)
idx = pd.date_range("2013-04-01", "2013-05-01", tz="Europe/London", freq="H")
s = Series(index=idx)
result = s.resample("W").mean()
expected = Series(
index=pd.date_range("2013-04-07", freq="W", periods=5, tz="Europe/London")
)
tm.assert_series_equal(result, expected)
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex(
[
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
"1970-01-01 00:00:01",
"1970-01-01 00:00:02",
]
)
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(
["1970-01-01 00:00:00", "1970-01-01 00:00:01", "1970-01-01 00:00:02"]
)
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample("1s").mean(), frame_1s)
index_2s = DatetimeIndex(["1970-01-01 00:00:00", "1970-01-01 00:00:02"])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample("2s").mean(), frame_2s)
index_3s = DatetimeIndex(["1970-01-01 00:00:00"])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample("3s").mean(), frame_3s)
assert_frame_equal(frame.resample("60s").mean(), frame_3s)
def test_resample_datetime_values():
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({"timestamp": dates}, index=dates)
exp = Series(
[datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range("2016-01-15", periods=3, freq="2D"),
name="timestamp",
)
res = df.resample("2D").first()["timestamp"]
tm.assert_series_equal(res, exp)
res = df["timestamp"].resample("2D").first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(series):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = series.resample("D").apply(f, multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = series.resample("D").apply(f, add_arg=multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = pd.DataFrame({"A": 1, "B": 2}, index=pd.date_range("2017", periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier)
expected = df.groupby("A").resample("D").mean().multiply(multiplier)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("k", [1, 2, 3])
@pytest.mark.parametrize(
"n1, freq1, n2, freq2",
[
(30, "S", 0.5, "Min"),
(60, "S", 1, "Min"),
(3600, "S", 1, "H"),
(60, "Min", 1, "H"),
(21600, "S", 0.25, "D"),
(86400, "S", 1, "D"),
(43200, "S", 0.5, "D"),
(1440, "Min", 1, "D"),
(12, "H", 0.5, "D"),
(24, "H", 1, "D"),
],
)
def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
s = pd.Series(
0, index=pd.date_range("19910905 13:00", "19911005 07:00", freq=freq1)
)
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
result2 = s.resample(str(n2_) + freq2).mean()
assert_series_equal(result1, result2)
@pytest.mark.parametrize(
"first,last,offset,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920407"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920407"),
("19910905 06:00", "19920406 06:00", "H", "19910905 06:00", "19920406 07:00"),
("19910906", "19920406", "M", "19910831", "19920430"),
("19910831", "19920430", "M", "19910831", "19920531"),
("1991-08", "1992-04", "M", "19910831", "19920531"),
],
)
def test_get_timestamp_range_edges(first, last, offset, exp_first, exp_last):
first = pd.Period(first)
first = first.to_timestamp(first.freq)
last = pd.Period(last)
last = last.to_timestamp(last.freq)
exp_first = pd.Timestamp(exp_first, freq=offset)
exp_last = pd.Timestamp(exp_last, freq=offset)
offset = pd.tseries.frequencies.to_offset(offset)
result = _get_timestamp_range_edges(first, last, offset)
expected = (exp_first, exp_last)
assert result == expected
| 31.632549
| 88
| 0.60332
|
560fe1546104fe817f956ee5d06f91a70113683a
| 1,950
|
py
|
Python
|
config/settings/local.py
|
roger-link/tacky
|
8c8b37bcf8c9773f9c7d283c9038c962b9923620
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
roger-link/tacky
|
8c8b37bcf8c9773f9c7d283c9038c962b9923620
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
roger-link/tacky
|
8c8b37bcf8c9773f9c7d283c9038c962b9923620
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='jx66uy!6uz@w6lb0tb37&e5=0@73^r=9ugzhx5n4sz&3z^xs^(')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| 30.952381
| 99
| 0.498974
|
0849bbd8fc29f9b6fee5b4e0b854bfbd588a4caa
| 1,750
|
py
|
Python
|
packages/nbextension/nteract_on_jupyter/extension.py
|
lgeiger/nteract
|
fbb426fd2a9fd9def57aca5e89444db86de25562
|
[
"BSD-3-Clause"
] | null | null | null |
packages/nbextension/nteract_on_jupyter/extension.py
|
lgeiger/nteract
|
fbb426fd2a9fd9def57aca5e89444db86de25562
|
[
"BSD-3-Clause"
] | 1
|
2018-10-19T15:07:06.000Z
|
2018-10-19T15:07:06.000Z
|
packages/nbextension/nteract_on_jupyter/extension.py
|
lgeiger/nteract
|
fbb426fd2a9fd9def57aca5e89444db86de25562
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""A tornado based nteract server."""
# Copyright (c) nteract development team.
# Distributed under the terms of the Modified BSD License.
import os
from notebook.utils import url_path_join as ujoin
from os.path import join as pjoin
from jupyter_core.paths import ENV_JUPYTER_PATH, jupyter_config_path
from ._version import __version__
from .config import Config
from .handlers import add_handlers
def get_app_dir(app_dir=None):
"""Get the configured nteract app directory.
"""
app_dir = app_dir or os.environ.get('NTERACT_DIR')
app_dir = app_dir or pjoin(ENV_JUPYTER_PATH[0], 'nteract')
return os.path.realpath(app_dir)
def load_jupyter_server_extension(nbapp):
"""Load the JupyterLab server extension.
"""
# Print messages.
here = os.path.dirname(__file__)
nbapp.log.info('nteract extension loaded from %s' % here)
#app_dir = get_app_dir()
#if hasattr(nbapp, 'app_dir'):
# app_dir = get_app_dir(nbapp.app_dir)
app_dir = here # bundle is part of the python package
web_app = nbapp.web_app
config = Config()
# original
# config.assets_dir = os.path.join(app_dir, 'static')
config.assets_dir = app_dir
config.page_title = 'nteract'
config.page_url = '/nteract'
config.dev_mode = False
# Check for core mode.
core_mode = ''
if hasattr(nbapp, 'core_mode'):
core_mode = nbapp.core_mode
# Check for an app dir that is local.
if app_dir == here or app_dir == os.path.join(here, 'build'):
core_mode = True
config.settings_dir = ''
web_app.settings.setdefault('page_config_data', dict())
web_app.settings['page_config_data']['token'] = nbapp.token
add_handlers(web_app, config)
| 27.34375
| 68
| 0.695429
|
0c48013030433aa41a8d9b86561c5388c0a8b9c9
| 3,743
|
py
|
Python
|
main.py
|
adarshchbs/adda_sketch
|
25f7adf3563d8e1edb8c431fb93876bbed4d4e76
|
[
"MIT"
] | null | null | null |
main.py
|
adarshchbs/adda_sketch
|
25f7adf3563d8e1edb8c431fb93876bbed4d4e76
|
[
"MIT"
] | null | null | null |
main.py
|
adarshchbs/adda_sketch
|
25f7adf3563d8e1edb8c431fb93876bbed4d4e76
|
[
"MIT"
] | null | null | null |
"""Main script for ADDA."""
import os
import numpy as np
import torch
import params
from pretrain import eval_src, train_src
from resnet_18 import ResNetClassifier, ResNetEncoder
from discriminator import Discriminator
from utils import init_model, init_random_seed
from test import eval_tgt
from adopt import train_target
from image_loader import image_loader
from torchvision import models
resnet = models.resnet50(pretrained=True)
resnet1 = models.resnet50(pretrained=True)
encoder = torch.nn.Sequential(*(list(resnet.children())[:-1]))
encoder1 = torch.nn.Sequential(*(list(resnet1.children())[:-1]))
if __name__ == '__main__':
# init random seed
init_random_seed(params.manual_seed)
path_sketchy = '/home/adarsh/project/adda_sketch/dataset/sketches/'
path_quickdraw = '/home/adarsh/project/adda_sketch/dataset/QuickDraw_sketches_final/'
path_class_list = '/home/adarsh/project/adda_sketch/common_class_list.txt'
gpu_name = 'cuda:1'
class_list = np.loadtxt(path_class_list,dtype='str')
# load dataset
source_loader = image_loader(parent_folder_path = path_sketchy,
folder_list= class_list,
split= [0.8,0.2,0] )
# print(source_loader.size_total)
src_data_loader = source_loader.image_gen(split_type='train')
src_data_loader_eval = source_loader.image_gen(split_type='val')
target_loader = image_loader(parent_folder_path = path_quickdraw,
folder_list = class_list,
split = [0.8, 0.005, 0] )
# load models
src_encoder = ResNetEncoder(encoder)
src_encoder.cuda(gpu_name)
src_classifier = ResNetClassifier()
src_classifier.cuda(gpu_name)
tgt_encoder = ResNetEncoder(encoder1)
tgt_encoder.cuda(gpu_name)
critic = Discriminator(input_dim=params.d_input_dims,
hidden_dim=params.d_hidden_dims,
output_dim=params.d_output_dims)
critic.cuda(gpu_name)
if(os.path.exists(params.src_encoder_restore) and
os.path.exists(params.src_classifier_restore)):
src_encoder.load_state_dict(torch.load(params.src_encoder_restore))
src_classifier.load_state_dict(torch.load(params.src_classifier_restore))
else:
src_encoder, src_classifier = train_src( src_encoder,
src_classifier,
source_loader, gpu_flag = True, gpu_name = gpu_name)
# eval source model
# print("=== Evaluating classifier for source domain ===")
# eval_src(src_encoder, src_classifier, source_loader, gpu_flag = True, gpu_name = gpu_name)
# print("=== Evaluating target encoder for source domain ===")
# eval_src(tgt_encoder, src_classifier, source_loader, gpu_flag = True, gpu_name = gpu_name)
# train target encoder by GAN
tgt_encoder.load_state_dict(src_encoder.state_dict())
if(os.path.exists(params.tgt_encoder_restore)):
tgt_encoder.load_state_dict(torch.load(params.tgt_encoder_restore))
else:
tgt_encoder = train_target(src_encoder, tgt_encoder, critic,src_classifier,
source_loader, target_loader, gpu_flag = True, gpu_name = gpu_name)
# eval target encoder on test set of target dataset
print("=== Evaluating classifier for encoded target domain ===")
print(">>> source only <<<")
eval_tgt(src_encoder, src_classifier, target_loader, gpu_flag = True, gpu_name=gpu_name)
print(">>> domain adaption <<<")
eval_tgt(tgt_encoder, src_classifier, target_loader, gpu_flag = True, gpu_name= gpu_name)
| 34.33945
| 100
| 0.6786
|
26c059bde1c2f2f5ba30184429168dae8efe8965
| 1,900
|
py
|
Python
|
setup.py
|
dhalbert/Adafruit_CircuitPython_BLE_iBBQ
|
586b3bbebdb7fe04b5c1ede501a5b546bf80756e
|
[
"MIT"
] | null | null | null |
setup.py
|
dhalbert/Adafruit_CircuitPython_BLE_iBBQ
|
586b3bbebdb7fe04b5c1ede501a5b546bf80756e
|
[
"MIT"
] | null | null | null |
setup.py
|
dhalbert/Adafruit_CircuitPython_BLE_iBBQ
|
586b3bbebdb7fe04b5c1ede501a5b546bf80756e
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="adafruit-circuitpython-ble-ibbq",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="BLE support for iBBQ thermometers",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/adafruit/Adafruit_CircuitPython_BLE_iBBQ",
# Author details
author="Adafruit Industries",
author_email="circuitpython@adafruit.com",
install_requires=["Adafruit-Blinka", "adafruit-circuitpython-ble"],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit blinka circuitpython micropython ble_ibbq ble ibbq thermometer",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# TODO: IF LIBRARY FILES ARE A PACKAGE FOLDER,
# CHANGE `py_modules=['...']` TO `packages=['...']`
py_modules=["adafruit_ble_ibbq"],
)
| 35.185185
| 87
| 0.689474
|
cf9f86b17d9ee1ac661c5f3a6aa151a31fd168eb
| 2,796
|
py
|
Python
|
server/templates/user/adduser.py
|
chenqianhe/CreditStatisticsSystem
|
882ae725dbb429303e3dce4da703116ae3c954fe
|
[
"MIT"
] | null | null | null |
server/templates/user/adduser.py
|
chenqianhe/CreditStatisticsSystem
|
882ae725dbb429303e3dce4da703116ae3c954fe
|
[
"MIT"
] | null | null | null |
server/templates/user/adduser.py
|
chenqianhe/CreditStatisticsSystem
|
882ae725dbb429303e3dce4da703116ae3c954fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# project: credit-statistics-system
# author: Chen Qianhe
# GitHub: https://github.com/chenqianhe
# datetime: 2021/12/14 13:03
from server.database.link_database import LinkDatabase
import time
from email.mime.text import MIMEText
from email.header import Header
import smtplib
import hashlib
def add_user(name: str,
id: str,
grade: str,
college: str,
major: str,
classtype: str,
mailbox: str,
password: str,
database: LinkDatabase) -> dict:
userdata = {"name": name,
"id": id,
"grade": grade,
"college": college,
"major": major,
"classtype": classtype,
"mailbox": mailbox,
"password": password
}
if database.count_data("USER", "user", {"id": id}) == 0:
database.add_data("USER", "user", userdata)
if database.count_data("USER", "user", {"id": id}):
database.add_data("course", "required", {"id": id, "course": "", "credit": 0.0})
database.add_data("course", "professionalelective", {"id": id, "course": "", "credit": 0.0})
database.add_data("course", "professionalpreparation", {"id": id, "course": "", "credit": 0.0})
database.add_data("course", "publicelective", {"id": id, "course": "", "credit": 0.0})
database.add_data("course", "publicpreparation", {"id": id, "course": "", "credit": 0.0})
database.add_data("course", "outside", {"id": id, "course": "", "credit": 0.0})
return {"state": "OK"}
else:
return {"state": "异常,未正常添加"}
else:
return {"state": "用户已存在"}
def send_email(mailbox: str) -> str:
verification_code = hashlib.md5(str(time.time())[-10:].encode(encoding='UTF-8')).hexdigest()[:5].upper()
addr = '1278095698@qq.com' # 发件邮箱
password = 'ivhbcvtjnpxjhhgg' # 邮箱密码(或者客户端授权码)
# 登录邮箱
smtp_server = 'smtp.qq.com'
server = smtplib.SMTP_SSL(smtp_server, 465)
try:
print('开始登录')
server.login(addr, password) # 登录邮箱
print('登录成功')
except Exception as e:
print('Error:', e)
print("邮件开始发送")
msg = MIMEText("您的验证码为:\n" + verification_code + "\n (3分钟内有效,请尽快输入。)", 'plain', 'utf-8')
msg['Subject'] = Header("学分统计系统验证码", 'utf-8')
msg['From'] = Header(addr)
flag = 1
try:
msg['To'] = Header(mailbox, 'utf-8')
server.sendmail(addr, mailbox, msg.as_string()) # 将msg转化成string发出
print("邮件发送成功")
except Exception as e:
flag = 0
print('Error:', e)
server.quit()
if flag:
return verification_code
else:
return "error"
| 32.137931
| 108
| 0.550787
|
72de1622d5fc406861fb42eb1ea1c397735b0a33
| 29,814
|
py
|
Python
|
bzt/modules/gatling.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 1,743
|
2015-03-30T20:56:03.000Z
|
2022-03-31T09:08:37.000Z
|
bzt/modules/gatling.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 1,159
|
2015-04-01T08:25:53.000Z
|
2022-03-29T08:15:31.000Z
|
bzt/modules/gatling.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 497
|
2015-03-31T21:05:18.000Z
|
2022-03-17T12:45:21.000Z
|
"""
Module holds all stuff regarding Gatling tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import json
import os
import re
import time
from collections import defaultdict
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, Scenario
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import ExecutorWidget
from bzt.requests_model import HTTPRequest, SetVariables, HierarchicRequestParser
from bzt.utils import TclLibrary, EXE_SUFFIX, dehumanize_time, get_full_path, FileReader, RESOURCES_DIR, BetterDict
from bzt.utils import simple_body_dict, CALL_PROBLEMS, numeric_types
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, ensure_is_dict, is_windows
class GatlingScriptBuilder(object):
def __init__(self, load, scenario, parent_logger, class_name, gatling_version=None):
super(GatlingScriptBuilder, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.load = load
self.feeder_names = {}
self.scenario = scenario
self.class_name = class_name
if gatling_version is None:
self.gatling_version = Gatling.VERSION
else:
self.gatling_version = gatling_version
# add prefix 'http://' if user forgot it
@staticmethod
def fixed_addr(addr):
if len(addr) > 0 and not addr.startswith('http'):
return 'http://' + addr
else:
return addr
@staticmethod
def indent(text, level):
return " " * level + text
def _get_http(self):
default_address = self.scenario.get("default-address", "")
http_str = '("%(addr)s")\n' % {'addr': self.fixed_addr(default_address)}
if self.scenario.get("retrieve-resources", False):
regex = self.scenario.get("retrieve-resources-regex")
params = 'BlackList(), WhiteList("""%s""")' % regex if regex else ""
http_str += self.indent(".inferHtmlResources(%s)\n" % params, level=2)
if not self.scenario.get('store-cache', True):
http_str += self.indent('.disableCaching\n', level=2)
scenario_headers = self.scenario.get_headers()
for key in scenario_headers:
http_str += self.indent('.header("%(key)s", "%(val)s")\n' % {'key': key, 'val': scenario_headers[key]},
level=2)
return http_str
def _get_exec(self):
exec_str = ''
for req in self.scenario.get_requests(parser=HierarchicRequestParser):
if isinstance(req, SetVariables):
if len(exec_str) > 0:
exec_str += '.'
exec_str += "exec(\n"
exec_str += self.indent("_", level=2)
for k, v in sorted(req.mapping.items()):
exec_str += '.set("%s", "%s")' % (k.replace("\"", "\\\""), v.replace("\"", "\\\""))
exec_str += "\n" + self.indent(")", level=1)
continue
if not isinstance(req, HTTPRequest):
msg = "Gatling simulation generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
if len(exec_str) > 0:
exec_str += '.'
default_address = self.scenario.get("default-address")
if default_address:
url = req.url
else:
url = self.fixed_addr(req.url)
exec_str += 'exec(\n'
exec_template = self.indent('http("%(req_label)s").%(method)s("%(url)s")\n', level=2)
exec_str += exec_template % {'req_label': req.label, 'method': req.method.lower(), 'url': url}
for key in req.headers:
exec_template = self.indent('.header("%(key)s", "%(val)s")\n', level=3)
exec_str += exec_template % {'key': key, 'val': req.headers[key]}
# todo: join with the same in get_sampler_pair
if isinstance(req.body, (dict, list, numeric_types)):
if req.get_header('content-type') == 'application/json' or isinstance(req.body, numeric_types):
req.body = json.dumps(req.body)
elif not simple_body_dict(req.body):
self.log.debug('Header "Content-Type: application/json" is required for body: "%s"', req.body)
req.body = json.dumps(req.body)
if isinstance(req.body, str):
stmt = '.body(%(method)s("""%(body)s"""))\n' % {'method': 'StringBody', 'body': req.body}
exec_str += self.indent(stmt, level=3)
elif isinstance(req.body, dict):
for key in sorted(req.body.keys()):
stmt = '.formParam("%(key)s", "%(val)s")\n' % {'key': key, 'val': req.body[key]}
exec_str += self.indent(stmt, level=3)
elif req.body is not None:
self.log.warning("Unknown body type: %s", req.body)
exec_str += self.__get_assertions(req.config.get('assert', []))
if not req.priority_option('follow-redirects', default=True):
exec_str += self.indent('.disableFollowRedirect\n', level=3)
exec_str += self.indent(')', level=1)
think_time = int(dehumanize_time(req.get_think_time()))
if think_time:
exec_str += '.pause(%(think_time)s)' % {'think_time': think_time}
return exec_str
@staticmethod
def __get_check_template(assertion):
a_not = assertion.get('not', False)
a_regexp = assertion.get('regexp', False)
a_subject = assertion.get('subject', Scenario.FIELD_BODY)
if a_subject == Scenario.FIELD_RESP_CODE:
if a_not:
res = 'status.not(%(sample)s)'
else:
res = 'status.is(%(sample)s)'
elif a_subject == Scenario.FIELD_HEADERS:
res = ''
else: # FIELD_BODY
if a_regexp:
res = 'regex("""%(sample)s""").'
else:
res = 'substring("""%(sample)s""").'
if a_not:
res += 'notExists'
else:
res += 'exists'
return res
def __get_assertions(self, assertions):
if len(assertions) == 0:
return ''
first_check = True
check_result = self.indent('.check(\n', level=3)
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
error_str = 'You must specify "contains" parameter for assertion item'
a_contains = assertion.get('contains', TaurusConfigError(error_str))
check_template = self.__get_check_template(assertion)
if check_template == '': # FIELD_HEADERS
self.log.warning('Sorry, but "headers" subject is not implemented for gatling asserts')
return ''
if not isinstance(a_contains, list):
a_contains = [a_contains]
for sample in a_contains:
if not first_check:
check_result += ',\n'
check_result += self.indent(check_template % {'sample': sample}, level=4)
first_check = False
check_result += '\n' + self.indent(')', level=3) + '\n'
return check_result
def _get_feeder_name(self, source_filename):
base_feeder_name = ".".join(os.path.basename(source_filename).split(".")[:-1])
base_feeder_name = re.sub(r'[^A-Za-z0-9_]', '', base_feeder_name) + "Feed"
index = 0
feeder_name = base_feeder_name
while feeder_name in self.feeder_names and self.feeder_names[feeder_name] != source_filename:
index += 1
feeder_name = base_feeder_name + "_%s" % index
if feeder_name not in self.feeder_names:
self.feeder_names[feeder_name] = source_filename
return feeder_name
def _get_feeders(self):
feeders_def = ""
feeding = ""
for source in self.scenario.get_data_sources():
path = self.scenario.engine.find_file(source["path"])
delimiter = source.get('delimiter', None)
loop_over = source.get("loop", True)
var_name = self._get_feeder_name(path)
params = dict(varname=var_name, filename=path, delimiter=delimiter)
if delimiter is not None:
tpl = """val %(varname)s = separatedValues("%(filename)s", '%(delimiter)s')"""
else:
tpl = 'val %(varname)s = csv("%(filename)s")'
line = self.indent(tpl % params, level=1)
if loop_over:
line += '.circular'
feeders_def += line + '\n'
feeding += "feed(%s)." % var_name
if feeders_def:
feeders_def = '\n' + feeders_def
return feeders_def, feeding
def gen_test_case(self):
template_path = os.path.join(RESOURCES_DIR, "gatling", "v3_script.tpl")
with open(template_path) as template_file:
template_line = template_file.read()
feeders_def, feeding = self._get_feeders()
params = {
'class_name': self.class_name,
'httpConf': self._get_http(),
'_exec': self._get_exec(),
'feeders': feeders_def,
'feeding': feeding,
}
return template_line % params
class GatlingExecutor(ScenarioExecutor):
"""
Gatling executor module
"""
def __init__(self):
super(GatlingExecutor, self).__init__()
self.script = None
self.process = None
self.end_time = None
self.retcode = None
self.simulation_started = False
self.dir_prefix = "gatling-%s" % id(self)
self.tool = None
def get_cp_from_files(self):
jar_files = []
files = self.execution.get('files', [])
for candidate in files:
candidate = self.engine.find_file(candidate)
if os.path.isfile(candidate) and candidate.lower().endswith('.jar'):
jar_files.append(candidate)
elif os.path.isdir(candidate):
for element in os.listdir(candidate):
element = os.path.join(candidate, element)
if os.path.isfile(element) and element.lower().endswith('.jar'):
jar_files.append(element)
return jar_files
def get_additional_classpath(self):
cp = self.get_scenario().get("additional-classpath", [])
cp.extend(self.settings.get("additional-classpath", []))
return cp
def prepare(self):
super(GatlingExecutor, self).prepare()
self.install_required_tools()
scenario = self.get_scenario()
self.env.set({"GATLING_HOME": self.tool.tool_dir})
cpath = self.get_additional_classpath()
self.log.debug("Classpath for Gatling: %s", cpath)
for element in cpath:
self.env.add_path({"JAVA_CLASSPATH": element})
self.env.add_path({"COMPILATION_CLASSPATH": element})
new_name = self.engine.create_artifact('gatling-launcher', EXE_SUFFIX)
self.log.debug("Building Gatling launcher: %s", new_name)
self.tool.build_launcher(new_name)
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.get_scenario()['simulation'], self.script = self.__generate_script()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Gatling tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.dir_prefix = self.settings.get("dir-prefix", self.dir_prefix)
self.stdout = open(self.engine.create_artifact("gatling", ".out"), "w")
self.stderr = open(self.engine.create_artifact("gatling", ".err"), "w")
self.reader = DataLogReader(self.engine.artifacts_dir, self.log, self.dir_prefix)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def __generate_script(self):
simulation = "TaurusSimulation_%s" % id(self)
file_name = self.engine.create_artifact(simulation, ".scala")
gen_script = GatlingScriptBuilder(self.get_load(), self.get_scenario(), self.log, simulation, self.tool.version)
with codecs.open(file_name, 'w', encoding='utf-8') as script:
script.write(gen_script.gen_test_case())
return simulation, file_name
def _get_simulation_props(self):
props = {}
if os.path.isfile(self.script):
if self.script.endswith('.jar'):
self.env.add_path({"JAVA_CLASSPATH": self.script})
self.env.add_path({"COMPILATION_CLASSPATH": self.script})
else:
props['gatling.core.directory.simulations'] = get_full_path(self.script, step_up=1)
else:
props['gatling.core.directory.simulations'] = self.script
simulation = self.get_scenario().get("simulation")
if simulation:
props['gatling.core.simulationClass'] = simulation
else:
props['gatling.core.runDescription'] = "Taurus_Test"
return props
def _get_load_props(self):
load = self.get_load()
props = {}
if load.concurrency:
props['concurrency'] = load.concurrency
if load.ramp_up is not None:
props['ramp-up'] = int(load.ramp_up)
if load.hold is not None:
props['hold-for'] = int(load.hold)
if load.iterations:
props['iterations'] = int(load.iterations)
if load.throughput:
if load.duration:
props['throughput'] = load.throughput
else:
self.log.warning("You should set up 'ramp-up' and/or 'hold-for' for usage of 'throughput'")
return props
def _get_scenario_props(self):
props = {}
scenario = self.get_scenario()
timeout = scenario.get('timeout', None)
if timeout is not None:
props['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(timeout) * 1000)
return props
def _set_env(self):
props = BetterDict()
props.merge(self.settings.get('properties'))
props.merge(self.get_scenario().get("properties"))
props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix
props['gatling.core.directory.resources'] = self.engine.artifacts_dir
props['gatling.core.directory.results'] = self.engine.artifacts_dir
props.merge(self._get_simulation_props())
props.merge(self._get_load_props())
props.merge(self._get_scenario_props())
for key in sorted(props.keys()):
prop = props[key]
val_tpl = "%s"
if isinstance(prop, str):
if not is_windows(): # extend properties support (contained separators/quotes/etc.) on lin/mac
val_tpl = "%r"
self.env.add_java_param({"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)})
self.env.set({"NO_PAUSE": "TRUE"})
self.env.add_java_param({"JAVA_OPTS": self.settings.get("java-opts", None)})
self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
def startup(self):
self._set_env()
self.process = self._execute([self.tool.tool_path])
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusConfigError:
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
# detect interactive mode and raise exception if it found
if not self.simulation_started:
wrong_line = "Choose a simulation number:"
with open(self.stdout.name) as out:
file_header = out.read(1024)
if wrong_line in file_header: # gatling can't select test scenario
scenarios = file_header[file_header.find(wrong_line) + len(wrong_line):].rstrip()
msg = 'Several gatling simulations are found, you must '
msg += 'specify one of them to use in "simulation" option: %s' % scenarios
raise TaurusConfigError(msg)
if 'started...' in file_header:
self.simulation_started = True
if self.retcode is None:
return False
elif self.retcode == 0:
return True
else:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics())
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Gatling worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Save data log as artifact
"""
if self.reader and self.reader.file and self.reader.file.name:
self.engine.existing_artifact(self.reader.file.name)
super(GatlingExecutor, self).post_process()
def install_required_tools(self):
self.tool = self._get_tool(Gatling, config=self.settings)
java = self._get_tool(JavaVM)
required_tools = [self._get_tool(TclLibrary), java, self.tool]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
def get_widget(self):
if not self.widget:
simulation = self.get_scenario().get('simulation', None)
if simulation == "TaurusSimulation_%s" % id(self):
simulation = 'generated script'
if simulation is None:
simulation = os.path.basename(self.script)
self.widget = ExecutorWidget(self, 'Gatling: %s' % simulation)
return self.widget
def resource_files(self):
files = []
script = self.get_script_path()
if script:
files.append(script)
else:
for source in self.get_scenario().get_data_sources():
source_path = self.engine.find_file(source["path"])
files.append(source_path)
files.extend(self.get_additional_classpath())
return files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Gatling STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Gatling STDERR:\n" + contents)
if self.reader and self.reader.file and self.reader.file.name:
with open(self.reader.file.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Simulation log:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
def __init__(self, basedir, parent_logger, dir_prefix):
super(DataLogReader, self).__init__()
self.concurrency = 0
self.log = parent_logger.getChild(self.__class__.__name__)
self.basedir = basedir
self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
self.partial_buffer = ""
self.delimiter = "\t"
self.dir_prefix = dir_prefix
self.guessed_gatling_version = None
self._group_errors = defaultdict(set)
def _extract_log(self, fields):
"""
Extract stats from Gatling format of version 3.1 and after
:param fields:
:return:
"""
# 0 ${RequestRecordHeader.value}
# 1 $scenario
# -|2 $userId, absent in Gatling 3.4+
# 2 ${serializeGroups(groupHierarchy)}
# 3 $label
# 4 $startTimestamp
# 5 $endTimestamp
# 6 $status
# [7] ${serializeMessage(message)}${serializeExtraInfo(extraInfo)}
if fields[0].strip() == "USER":
if self.guessed_gatling_version < "3.4+":
del fields[2] # ignore obsolete $userId
if fields[2].strip() == "START":
self.concurrency += 1
elif fields[2].strip() == "END":
self.concurrency -= 1
elif fields[0].strip() == "GROUP":
del fields[0]
return self.__parse_group(fields)
elif fields[0].strip() == "REQUEST":
del fields[0]
return self.__parse_request(fields)
else:
return None
def __parse_group(self, fields):
latency = 0.0
con_time = 0.0
if len(fields) < 3:
label = ""
t_stamp = int(fields[1]) / 1000.0
r_time = 0
error = fields[0]
r_code = "N/A"
else:
if self.guessed_gatling_version < "3.4+":
del fields[0] # ignore obsolete $userId
label = fields[0]
if ',' in label:
return None # skip nested groups for now
t_stamp = int(fields[2]) / 1000.0
r_time = int(fields[3]) / 1000.0
if label in self._group_errors:
error = ';'.join(self._group_errors.pop(label))
else:
error = None
if fields[4] == 'OK':
r_code = '200'
else:
r_code = self.__rc_from_msg(fields[-1])
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def __parse_request(self, fields):
# see LogFileDataWriter.ResponseMessageSerializer in gatling-core
if self.guessed_gatling_version < "3.4+":
del fields[0] # ignore obsolete $userId
if len(fields) >= 6 and fields[5]:
error = fields[5]
else:
error = None
req_hierarchy = fields[0].split(',')[0]
if req_hierarchy:
if error:
self._group_errors[req_hierarchy].add(error)
return None
label = fields[1]
t_stamp = int(fields[3]) / 1000.0
r_time = (int(fields[3]) - int(fields[2])) / 1000.0
latency = 0.0
con_time = 0.0
if fields[4] == 'OK':
r_code = '200'
else:
r_code = self.__rc_from_msg(fields[-1])
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def __rc_from_msg(self, msg):
_tmp_rc = msg.split("but actually ")[-1] # gatling-core/src/main/scala/io/gatling/core/check/Validator.scala
if _tmp_rc.startswith("unexpectedly "):
_tmp_rc = _tmp_rc[len("unexpectedly "):]
if _tmp_rc.startswith("found "):
_tmp_rc = _tmp_rc[len("found "):]
parts = _tmp_rc.split(' ')
if len(parts) > 1 and parts[1] == 'is':
_tmp_rc = parts[0]
return _tmp_rc if _tmp_rc.isdigit() else 'N/A'
def _guess_gatling_version(self, fields):
if fields and fields[-1].strip() < "3.4":
return "3.3.X"
elif fields[-1].strip() >= "3.4":
return "3.4+"
else:
return ""
def _extract_log_data(self, fields):
if self.guessed_gatling_version is None:
self.guessed_gatling_version = self._guess_gatling_version(fields)
return self._extract_log(fields) if self.guessed_gatling_version else None
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
data = self._extract_log_data(fields)
if data is None:
continue
t_stamp, label, r_time, con_time, latency, r_code, error = data
bytes_count = None
yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count
def open_fds(self, filename):
"""
open gatling simulation.log
"""
if os.path.isdir(self.basedir):
prog = re.compile("^%s-[0-9]+$" % self.dir_prefix)
for fname in os.listdir(self.basedir):
if prog.match(fname):
filename = os.path.join(self.basedir, fname, "simulation.log")
break
if not filename or not os.path.isfile(filename):
self.log.debug('simulation.log not found')
return
elif os.path.isfile(self.basedir):
filename = self.basedir
else:
self.log.debug('Path not found: %s', self.basedir)
return
if not os.path.getsize(filename):
self.log.debug('simulation.log is empty')
else:
return open(filename, 'rb')
class Gatling(RequiredTool):
"""
Gatling tool
"""
DOWNLOAD_LINK = "https://repo1.maven.org/maven2/io/gatling/highcharts/gatling-charts-highcharts-bundle" \
"/{version}/gatling-charts-highcharts-bundle-{version}-bundle.zip"
VERSION = "3.5.1"
LOCAL_PATH = "~/.bzt/gatling-taurus/{version}/bin/gatling{suffix}"
def __init__(self, config=None, **kwargs):
settings = config or {}
version = settings.get("version", self.VERSION)
def_path = self.LOCAL_PATH.format(version=version, suffix=EXE_SUFFIX)
gatling_path = get_full_path(settings.get("path", def_path))
download_link = settings.get("download-link", self.DOWNLOAD_LINK).format(version=version)
super(Gatling, self).__init__(tool_path=gatling_path, download_link=download_link, version=version, **kwargs)
self.tool_dir = get_full_path(self.tool_path, step_up=2)
def check_if_installed(self):
self.log.debug("Trying Gatling...")
try:
out, err = self.call([self.tool_path, '--help'])
self.log.debug("Gatling check output: %s", out)
except CALL_PROBLEMS as exc:
self.log.info("Gatling check failed: %s", exc)
return False
if err:
self.log.warning("Gatling check stderr: %s", err)
return True
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
gatling_dist = self._download(use_link=True)
self.log.info("Unzipping %s", gatling_dist)
unzip(gatling_dist, dest, 'gatling-charts-highcharts-bundle-' + self.version)
os.remove(gatling_dist)
os.chmod(get_full_path(self.tool_path), 0o755)
self.log.info("Installed Gatling successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
def build_launcher(self, new_name): # legacy, for v2 only
modified_lines = []
mod_success = False
with open(self.tool_path) as fds:
for line in fds.readlines():
if is_windows():
if line.startswith('set COMPILER_CLASSPATH='):
mod_success = True
line = line.rstrip() + ';%COMPILATION_CLASSPATH%\n' # add from env
elif line.startswith('set GATLING_CLASSPATH='):
mod_success = True
line = line.rstrip() + ';%JAVA_CLASSPATH%\n' # add from env
else:
if line.startswith('COMPILER_CLASSPATH='):
mod_success = True
line = line.rstrip()[:-1] + '${COMPILATION_CLASSPATH}"\n' # add from env
elif line.startswith('GATLING_CLASSPATH='):
mod_success = True
line = line.rstrip()[:-1] + '${JAVA_CLASSPATH}"\n' # add from env
elif line.startswith('"$JAVA"'):
line = 'eval ' + line
modified_lines.append(line)
if not mod_success:
raise ToolError("Can't modify gatling launcher for jar usage, ability isn't supported")
self.tool_path = new_name
with open(self.tool_path, 'w') as modified:
modified.writelines(modified_lines)
if not is_windows():
os.chmod(self.tool_path, 0o755)
| 37.979618
| 120
| 0.581438
|
91cac733cd43ef59c57f2a1a42d31ebc89e2564e
| 4,262
|
py
|
Python
|
Kanta.py
|
TomasJouhilampi/Painonhallinta
|
dd5ca4931316ba25f9946f8c91dd5697aae1d127
|
[
"CC0-1.0"
] | null | null | null |
Kanta.py
|
TomasJouhilampi/Painonhallinta
|
dd5ca4931316ba25f9946f8c91dd5697aae1d127
|
[
"CC0-1.0"
] | null | null | null |
Kanta.py
|
TomasJouhilampi/Painonhallinta
|
dd5ca4931316ba25f9946f8c91dd5697aae1d127
|
[
"CC0-1.0"
] | null | null | null |
# Tietokantamoduli
# Modulien ja kirjastojen lataukset
import sqlite3
from sqlite3.dbapi2 import SQLITE_INSERT
# Luodaan uusi tietokanta projektin hakemistoon
tietokannan_nimi = 'painonhallinta.db'
def luo_tietokanta(tiedosto):
"""Luo tietokannan huom. tiedoston tyyppi po. .db
Args:
tiedosto (string): SQL Lite tietokantatiedoston nimi
"""
yhteys = sqlite3.connect(tiedosto)
yhteys.close()
def luo_taulut(tiedosto):
"""Luo SQL Lite tietokantaan tarvittavat taulut
"""
# Muodostetaan yhteys tietokantaan, luodaan kanta tarvittaessa
yhteys = sqlite3.connect(tiedosto)
# Luodaan Henkilö-taulu
yhteys.execute('''CREATE TABLE henkilo
(henkilo_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
etunimi TEXT NOT NULL,
sukunimi TEXT NOT NULL,
sukupuoli INTEGER NOT NULL,
spaiva DATE NOT NULL);''')
# Luodaan Mittaukset-taulu
yhteys.execute('''CREATE TABLE mittaus
(mittaus_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
henkilo_id INTEGER NOT NULL,
pituus REAL NOT NULL,
paino REAL NOT NULL,
FOREIGN KEY (henkilo_id)
REFERENCES henkilo (henkilo_id)
ON DELETE CASCADE);''')
# Suljetaan tietokantayhteys taulujen luonnin jälkeen
yhteys.close()
# Luodaan testidataa
def lisaa_henkilo(tiedosto, etunimi, sukunimi, sukupuoli, spaiva):
"""Lisätään argumenttina annetun tietokannan henkilo-tauluun uusi tietue
Args:
tiedosto (string): tietokantatiedoston nimi
etunimi (string): Henkilön etunimi
sukunimi (string): Henkilön sukunimi
sukupuoli (int): Sukupuolikoodi 1: mies 0: nainen
spaiva (string): ISO-standardin mukainen päiväys YYYY-MM-DD
"""
# Rakennetaan SQL-lause argumenttien arvoista
sql_lause = "INSERT INTO henkilo (etunimi, sukunimi, sukupuoli, spaiva) VALUES (" + "'" + etunimi + "', " + "'" + sukunimi + "', " + str(sukupuoli) + ", " + "'" + spaiva + "');"
# Luodaan yhteys tietokantaan
yhteys = sqlite3.connect(tiedosto)
# Suoritetaan tietueen lisäys SQL-lauseena
yhteys.execute(sql_lause)
# Vahvistetaan tapahtuma (transaktio)
yhteys.commit()
# Suljetaan yheys
yhteys.close()
# TODO: luo funktio, jolla saadaan puolilainausmerkit merkkijonon ympärille
def sql_string(kentta):
kentta = "'" + kentta +"'"
return kentta
# TODO: luo rutiini mittaustietojen syöttämiseksi mittaukset tauluun
def lisaa_mittaus(tiedosto, henkilo_id, pituus, paino):
"""Lisää henkilön mittaustiedot mittaus-tauluuun
Args:
henkilo_id (integer): henkiön id
pituus (float): henkilön pituus sentteinä
paino (float): henkilön paino kiloina
"""
sql_lause = "INSERT INTO mittaus (henkilo_id, pituus, paino) VALUES (" + str(henkilo_id) + "," + str(pituus) + "," + str(paino) + ");"
# Luodaan yhteys tietokantaan
yhteys = sqlite3.connect(tiedosto)
# Suoritetaan tietueen lisäys SQL-lauseena
yhteys.execute(sql_lause)
# Vahvistetaan tapahtuma (transaktio)
yhteys.commit()
# Suljetaan yheys
yhteys.close()
# TODO: luo rutiini tietojen lukemiseksi molemmista tauluita
def lue_kaikki(tiedosto, taulu):
"""[summary]
Args:
tiedosto (string): tietokantatiedoston nimi
taulu (string): taulun nimi
Returns:
list tulosjoukon tietueet
"""
# Paikallinen testaus
if __name__ == "__main__":
luo_tietokanta(tietokannan_nimi)
luo_taulut(tietokannan_nimi)
'''
etunimi = 'Mikko'
sukunimi = 'Viljanen'
sukupuoli = 1
spaiva = '1968-12-03'
sql_lause = "INSERT INTO henkilo (etunimi, sukunimi, sukupuoli, spaiva) VALUES (" + "'" + etunimi + "', " + "'" + sukunimi + "', " + str(sukupuoli) + ", " + "'" + spaiva + "');"
print(sql_lause) '''
lisaa_henkilo(tietokannan_nimi, 'Mikko', 'Viljanen', 1, '1968-12-03')
lisaa_henkilo(tietokannan_nimi, 'Mika', 'Vainio', 1, '1962-06-26')
'''
henkilo_id = 1
pituus = 171
paino = 74
sql_lause = "INSERT INTO mittaus (henkilo_id, pituus, paino) VALUES (" + str(henkilo_id) + "," + str(pituus) + "," + str(paino) + ");"
print(sql_lause) '''
lisaa_mittaus(tietokannan_nimi, 2, 171, 74)
| 32.287879
| 181
| 0.665415
|
97e0a8690f0eef7fcecc3005f2550e2768d4714e
| 1,045
|
py
|
Python
|
frappe/core/doctype/language/language.py
|
sbkolate/jacfrappe
|
40cdb5b5fb0f1ccf802cb43df3cf65b593134fb5
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/language/language.py
|
sbkolate/jacfrappe
|
40cdb5b5fb0f1ccf802cb43df3cf65b593134fb5
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/language/language.py
|
sbkolate/jacfrappe
|
40cdb5b5fb0f1ccf802cb43df3cf65b593134fb5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
class Language(Document):
pass
def export_languages_json():
'''Export list of all languages'''
languages = frappe.db.get_all('Language', fields=['name', 'language_name'])
languages = [{'name': d.language_name, 'code': d.name} for d in languages]
languages.sort(lambda a,b: 1 if a['code'] > b['code'] else -1)
with open(frappe.get_app_path('frappe', 'geo', 'languages.json'), 'w') as f:
f.write(frappe.as_json(languages))
def sync_languages():
'''Sync frappe/geo/languages.json with Language'''
with open(frappe.get_app_path('frappe', 'geo', 'languages.json'), 'r') as f:
data = json.loads(f.read())
for l in data:
if not frappe.db.exists('Language', l['code']):
frappe.get_doc({
'doctype': 'Language',
'language_code': l['code'],
'language_name': l['name']
}).insert()
| 30.735294
| 77
| 0.689952
|
9770616e911b7c9607656a82995664db40988d38
| 3,443
|
py
|
Python
|
mwa_pb/measured_beamformer.py
|
NickSwainston/mwa_pb
|
6656ee2b83c23e6c31ae8dac6f67a46d9b881eae
|
[
"MIT"
] | 2
|
2020-07-20T05:38:00.000Z
|
2020-08-10T01:31:52.000Z
|
mwa_pb/measured_beamformer.py
|
NickSwainston/mwa_pb
|
6656ee2b83c23e6c31ae8dac6f67a46d9b881eae
|
[
"MIT"
] | 15
|
2019-05-01T01:58:06.000Z
|
2022-03-23T15:54:13.000Z
|
mwa_pb/measured_beamformer.py
|
NickSwainston/mwa_pb
|
6656ee2b83c23e6c31ae8dac6f67a46d9b881eae
|
[
"MIT"
] | 7
|
2019-04-16T08:30:40.000Z
|
2022-03-28T13:34:13.000Z
|
import os
import numpy
from scipy import interpolate
from . import config
##################################
def delayset2delaylines(delayset):
"""
Calculate an array of 5 delay line flags based on the delay setting.
NOT VECTOR.
"""
# See if we have a valid delay setting
if delayset < 0 or delayset > 63:
raise ValueError("Invalid Delay Setting %s" % repr(delayset))
# Delay settings with the MSB set are turned off, return None
if delayset > 31:
return None
# Iterate through delaylines
t = delayset
dlines = [False] * 5
for i in range(4, -1, -1):
if t >= 2 ** i:
t = t - 2 ** i
dlines[i] = True
return dlines
##################################
def get_delay_length(delayset, freq, delayfile=None):
"""
Get a delay length (in seconds) from a delay set
"""
if delayfile is None:
delayfile = config.MEAS_DELAYS
if not os.path.exists(delayfile):
raise ValueError("Delay File %s does not exist" % delayfile)
# Read in the array from the delay file
t = []
f = open(delayfile)
for line in f:
t.append(list(map(float, line.split())))
darr = numpy.array(t)
f_freqs = darr[:, 0] # Array of frequencies in the delay file.
# Columns 1 through 5 in the delay file correspond to those delay lines
delayset = numpy.array(delayset)
outdelay = numpy.zeros(delayset.size, dtype='float64')
for j in range(delayset.size):
# stupid numpy zero-length array type....
if delayset.size == 1:
dlines = delayset2delaylines(delayset)
else:
dlines = delayset2delaylines(delayset[j])
for i in range(5):
# Check if each delay line is on. If it is, interpolate the file
# to find the amount of delay to add
if dlines[i]:
ifunc = interpolate.splrep(f_freqs, darr[:, i + 1], s=0)
outdelay[j] = outdelay[j] + interpolate.splev(freq, ifunc, der=0)
return outdelay
##################################
def get_delay_gains(delayset, freq, delayfile=None):
"""
Get a delay gains (linear scale) from a delay set
"""
gainfile = ''
if delayfile is None:
gainfile = config.MEAS_GAINS
if not os.path.exists(gainfile):
raise ValueError("Gain File %s does not exist" % gainfile)
# Read in the array from the delay file
t = []
f = open(gainfile)
for line in f:
t.append(list(map(float, line.split())))
garr = numpy.array(t)
f_freqs = garr[:, 0] # Array of frequencies in the delay file.
# Columns 1 through 5 in the delay file correspond to those delay lines
delayset = numpy.array(delayset)
outgain = numpy.zeros(delayset.size, dtype='float64')
for j in range(delayset.size):
# stupid numpy zero-length array type....
if delayset.size == 1:
dlines = delayset2delaylines(delayset)
else:
dlines = delayset2delaylines(delayset[j])
for i in range(5):
# Check if each delay line is on. If it is, interpolate the file
# to find the amount of delay to add
if dlines[i]:
ifunc = interpolate.splrep(f_freqs, garr[:, i + 1], s=0)
outgain[j] = outgain[j] + interpolate.splev(freq, ifunc, der=0)
return (10.0 ** (outgain / 20.0))
| 29.681034
| 81
| 0.584084
|
e6d7556e0f81f73317ad863f5224df2799653de4
| 9,001
|
py
|
Python
|
shopify_yearmakemodel_advanced_search/shopify_yearmakemodel_advanced_search.py
|
chaps/py_shopify_ymm_advance_search
|
f83a3c8d30728d4c281b879cbc458e82771654f7
|
[
"MIT"
] | null | null | null |
shopify_yearmakemodel_advanced_search/shopify_yearmakemodel_advanced_search.py
|
chaps/py_shopify_ymm_advance_search
|
f83a3c8d30728d4c281b879cbc458e82771654f7
|
[
"MIT"
] | null | null | null |
shopify_yearmakemodel_advanced_search/shopify_yearmakemodel_advanced_search.py
|
chaps/py_shopify_ymm_advance_search
|
f83a3c8d30728d4c281b879cbc458e82771654f7
|
[
"MIT"
] | null | null | null |
"""Main module."""
import requests
from enum import Enum
class SearchConditionsEnum(Enum):
EQUALS = "equals"
NOT_EQUALS = "not_equals"
BEGINS_WITH = "begins_with"
ENDS_WITH = "ends_with"
CONTAINS = "contains"
DOES_NOT_CONTAINS = "does_not_contains"
GREATER_THAN = "greater_than"
GREATER_THAN_OR_EQUAL = "greater_than_or_equal"
LESS_THAN = "less_than"
LESS_THAN_OR_EQUAL = "less_than_or_equal"
class Shopify_YMM_AS:
DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36"
ROUTES = {
"GET_YMM": "/ajax/manage_ymm.php",
"POST_YMM": "/ajax/manage_ymm.php",
"ASSIGN_YMM": "/ajax/bulk_ymm.php",
"GET_PROD_YMM": "/ajax/manage_ymm.php"
}
def __init__(
self, domain,
service_domain="https://www.ymmshopify.capacitywebservices.com"
):
self.domain = domain
self.service_domain = service_domain
self.headers = {}
def build_default_headers(self):
headers = self.headers
if not "user-agent" in headers:
headers["user-agent"] = self.DEFAULT_USER_AGENT
return headers
def build_post_fields(self, fields):
""" Given an array of values,
build the array with the data expected by the payload
for each entry.
"""
return [
{
"field_val": value,
"tag_val": value,
"field":"field_{}".format(i),
"tag_field":"field_{}_tag".format(i)
}
for i, value in enumerate(fields, start=1)
]
def get_ymms(self):
""" Performs a GET request to obtain all YMM entries
for the domain object's target domain.
Given the correct parameters,
Returns an HTTP response with the JSON YMM data
example: {"all_count":"3","total":3,"list":[{"id":"1","field_1":"1999","field_2":"Make ","field_3":"Nano"}]}
"""
headers = self.build_default_headers()
response = requests.get(
f"{self.service_domain}{self.ROUTES['GET_YMM']}",
params={
"action": "get",
"domain": self.domain
},
headers=headers
)
return response
def search_ymms(
self,
text_search,
filter_year=True,
filter_make=True,
filter_model=True,
page=1,
search_type=SearchConditionsEnum.EQUALS
):
""" Performs a POST request to obtain all YMM entries
for the domain object's target domain, that match a certain search criteria.
"""
headers = self.build_default_headers()
payload = {}
payload["txt"] = text_search
filter_fields = []
if filter_year:
filter_fields.append("field_1")
if filter_make:
filter_fields.append("field_2")
if filter_model:
filter_fields.append("field_3")
payload["search_cond"] = search_type.value if type(search_type) == SearchConditionsEnum else search_type
payload["page"] = page
payload["filter_fields"] = filter_fields
response = requests.post(
f"{self.service_domain}{self.ROUTES['GET_YMM']}",
params={
"action": "get",
"domain": self.domain
},
json=payload,
headers=headers
)
return response
def search_ymm(self, *args, **kwargs):
"""
Performs a search_ymms, returns None if no results were found
returns the id if only 1 result was found , or return the list of results
if many are found.
"""
response_data = self.search_ymms(*args, **kwargs).json()
if response_data["total"] == 1:
return response_data["list"][0]["id"]
if response_data["total"] > 0:
return response_data["list"]
return None
def get_single_ymm(self, ymm_id):
""" Performs a GET request to obtain a single YMM entry
for the domain object's target domain.
Given the correct parameters,
Returns an HTTP response with the JSON YMM data
example: {"id":"18","shop_id":"49639227552",
"product_ids":"6074816233632,6071459348640,6071459610784","field_1":"2020","field_1_tag":"yr_2020",
"field_2":"model2","field_2_tag":"mk_model2","field_3":"make2","field_3_tag":"md_make2"}
"""
headers = self.build_default_headers()
response = requests.get(
f"{self.service_domain}{self.ROUTES['GET_YMM']}",
params={
"action": "edit",
"data_id": ymm_id,
"domain": self.domain
},
headers=headers
)
return response
def add_ymm(self, fields, prod_ids=[]):
"""
Given an array of YMM field values,
and an optional array of product ids.
build the fields expected payload
and perform a POST request to create a new
YMM entry, if product_ids where given,
the new entry will be related to the given product_ids.
"""
payload = {
"ymm_fields": self.build_post_fields(fields),
"product_ids": prod_ids
}
headers = self.build_default_headers()
return requests.post(
f"{self.service_domain}{self.ROUTES['POST_YMM']}",
json = payload,
params = {
"domain": self.domain,
"action": "save"
},
headers = headers
)
def update_ymm(self, fields, ymm_id, prod_ids=[]):
"""
Given an array of YMM field values,
and an optional array of product ids.
build the fields expected payload
and perform a POST request to update a
YMM entry, if product_ids where given,
the new entry will be related to the given product_ids.
"""
payload = {
"ymm_fields": self.build_post_fields(fields),
"product_ids": prod_ids
}
headers = self.build_default_headers()
return requests.post(
f"{self.service_domain}{self.ROUTES['POST_YMM']}",
json = payload,
params = {
"domain": self.domain,
"data_id": ymm_id,
"action": "save"
},
headers = headers
)
def assign_prods_ymms(self, prods, ymms):
"""
Given a list of shopify product ids
and a list of YMM ids.
Perform a POST request to create a relation between them.
Returns the response object from the performed request.
"""
payload = {
"product_ids": prods,
"ymm_row_ids": ymms,
}
headers = self.build_default_headers()
return requests.post(
f"{self.service_domain}{self.ROUTES['ASSIGN_YMM']}",
json = payload,
params = {
"domain": self.domain,
"action": "bulk_assign"
},
headers=headers
)
def get_prod_ymms(self, prod_ymms_id):
"""
Given a YMM id, Perform a GET request to
obtain the given YMM id information and all the Product-YMMs relations
Returns the response object from the performed request.
"""
headers = self.build_default_headers()
return requests.get(
f"{self.service_domain}{self.ROUTES['GET_PROD_YMM']}",
params = {
"domain": self.domain,
"action": "edit",
"data_id": prod_ymms_id
},
headers=headers
)
def delete_ymm(self, ymm_id):
"""
Given an array of a single or multiple YMM ids as string,
perform a POST request to delete existent YMM entries.
"""
payload = {
"delete_id": ymm_id
}
headers = self.build_default_headers()
return requests.post(
f"{self.service_domain}{self.ROUTES['POST_YMM']}",
json = payload,
params = {
"domain": self.domain,
"action": "delete"
},
headers = headers
)
def delete_all_ymms(self):
""" Performs a GET request to delate all YMM entries
for the domain object's target domain.
Given the correct parameters,
Returns an HTTP response with code 200
"""
headers = self.build_default_headers()
response = requests.get(
f"{self.service_domain}{self.ROUTES['GET_YMM']}",
params={
"action": "delete_all",
"domain": self.domain
},
headers=headers
)
return response
| 32.970696
| 136
| 0.553383
|
ade3c20b4a698e2f676fbe78717055cde6382852
| 1,037
|
py
|
Python
|
esp32/tools/run_initial_lopy_test.py
|
knagymate/pycom-micropython-sigfox-ble
|
03a166e7e5a98eeab273305fb4a22d3f32326b93
|
[
"MIT"
] | 3
|
2020-03-06T16:14:00.000Z
|
2021-09-11T20:50:27.000Z
|
esp32/tools/run_initial_lopy_test.py
|
knagymate/pycom-micropython-sigfox-ble
|
03a166e7e5a98eeab273305fb4a22d3f32326b93
|
[
"MIT"
] | 2
|
2020-03-12T07:59:22.000Z
|
2020-03-12T08:41:29.000Z
|
esp32/tools/run_initial_lopy_test.py
|
knagymate/pycom-micropython-sigfox-ble
|
03a166e7e5a98eeab273305fb4a22d3f32326b93
|
[
"MIT"
] | 1
|
2020-08-27T21:29:52.000Z
|
2020-08-27T21:29:52.000Z
|
#
# Copyright (c) 2020, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
import pyboard
import os
def load_board_script():
with open(os.path.dirname(os.path.realpath(__file__)) + '/lopy_initial_test_board_script.py', 'rb') as input:
remote_code = input.read()
return remote_code
def run_program_script(pyb):
flash_test_code = load_board_script()
pyb.enter_raw_repl_no_reset()
pyb.exec_raw_no_follow(flash_test_code)
def detect_test_status(pyb):
if pyb._wait_for_exact_text("Test "):
status = pyb.read_until("\n")
if "OK" in status:
return True
else:
return False
else:
return False
def test_board(serial_port):
pyb = pyboard.Pyboard(serial_port)
run_program_script(pyb)
return detect_test_status(pyb)
| 28.027027
| 113
| 0.707811
|
2a728b68cd416b82e16d4e753d1c4edfa426117f
| 2,017
|
py
|
Python
|
english/clustering/Meanshift/Meanshift_predict_cloud.py
|
Lyuyangdaisy/DS_package
|
ca0f220598ee156028646fbefccde08b2ece62ea
|
[
"MIT"
] | 6
|
2021-03-13T10:33:47.000Z
|
2022-01-23T07:22:40.000Z
|
english/clustering/Meanshift/Meanshift_predict_cloud.py
|
Lyuyangdaisy/DS_package
|
ca0f220598ee156028646fbefccde08b2ece62ea
|
[
"MIT"
] | null | null | null |
english/clustering/Meanshift/Meanshift_predict_cloud.py
|
Lyuyangdaisy/DS_package
|
ca0f220598ee156028646fbefccde08b2ece62ea
|
[
"MIT"
] | 2
|
2021-05-05T17:47:34.000Z
|
2021-10-10T16:13:53.000Z
|
from sklearn.model_selection import train_test_split
import sys
import os
import joblib
import warnings
from mlp_training_cloud import get_data,data_process
import pandas as pd
import matplotlib.pyplot as plt
import datetime
warnings.filterwarnings("ignore")
# **********************************************************Parameter Adjustment Part*************************************************************************
CHOICE = 1 # data pre-processing method -- 0.Robust Standardization 1.normalization 2.Standardization
# file
#FEATURE_FILE_PATH = "/tmp/feature.xlsx" # file where the features are located
#LABEL_FILE_PATH = "/tmp/label.xlsx" # file where the labels are located
def main():
FEATURE_FILE_PATH = sys.argv[1]
LABEL_FILE_PATH = sys.argv[2]
INPUT_MODEL = sys.argv[3]
OUTPUT_RESULTS = sys.argv[4]
TIMESTAMP = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S").replace("'","")
# base = os.getcwd()
base = INPUT_MODEL
dir = os.path.join(base,"test.pkl")
if not os.path.exists(dir):
raise ValueError("The model file does not exist.")
Mshift = joblib.load(INPUT_MODEL+'test.pkl') # call the pre-trained model
print(Mshift)
# get data
print("Read file.........")
print("Feature file:{}".format(FEATURE_FILE_PATH))
if not os.path.exists(FEATURE_FILE_PATH):
raise ValueError("The feature file does not exist.")
X = pd.read_excel(FEATURE_FILE_PATH).fillna(0)
for col in X.columns:
X[col] = X[col].apply(lambda x:str(x).replace(" ",""))
if X.isnull().any().any():
raise ValueError("There is missing data in the feature file.")
# data pre-process
X_ = data_process(X,CHOICE)
# data prediction and results visualization
y_pred = Mshift.predict(X_)
y_pred = pd.DataFrame(y_pred,columns=['predict'])
df = pd.concat([X,y_pred],axis=1)
df.to_csv(OUTPUT_RESULTS+"{}_results.csv".format(TIMESTAMP),index=None)
if __name__ == '__main__':
main()
| 32.015873
| 158
| 0.639068
|
2afb1e5a36b9e5908b3b07a9ebb1817345fe5bb6
| 310
|
py
|
Python
|
practical/core/views.py
|
Ekhel/django-practical
|
ef92d09e841cdef2c9fc1dd935f02a16ed67a60b
|
[
"MIT"
] | null | null | null |
practical/core/views.py
|
Ekhel/django-practical
|
ef92d09e841cdef2c9fc1dd935f02a16ed67a60b
|
[
"MIT"
] | null | null | null |
practical/core/views.py
|
Ekhel/django-practical
|
ef92d09e841cdef2c9fc1dd935f02a16ed67a60b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth import (login as auth_login, authenticate)
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
def index(request):
return render(request,'apl/home.html')
| 31
| 68
| 0.822581
|
a47aba75e5ffb691a45490d9b080292b83d26fce
| 32,205
|
py
|
Python
|
daemon/tests/test_gui.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/tests/test_gui.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/tests/test_gui.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Tests for testing tlv message handling.
"""
import os
import time
import mock
import netaddr
import pytest
from mock import MagicMock
from core.api.tlv import coreapi
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emulator.enumerations import (
ConfigFlags,
ConfigTlvs,
EventTlvs,
EventTypes,
ExecuteTlvs,
FileTlvs,
LinkTlvs,
MessageFlags,
NodeTlvs,
NodeTypes,
RegisterTlvs,
SessionTlvs,
)
from core.errors import CoreError
from core.location.mobility import BasicRangeModel
def dict_to_str(values):
return "|".join(f"{x}={values[x]}" for x in values)
class TestGui:
@pytest.mark.parametrize(
"node_type, model",
[
(NodeTypes.DEFAULT, "PC"),
(NodeTypes.EMANE, None),
(NodeTypes.HUB, None),
(NodeTypes.SWITCH, None),
(NodeTypes.WIRELESS_LAN, None),
(NodeTypes.TUNNEL, None),
],
)
def test_node_add(self, coretlv, node_type, model):
node_id = 1
message = coreapi.CoreNodeMessage.create(
MessageFlags.ADD.value,
[
(NodeTlvs.NUMBER, node_id),
(NodeTlvs.TYPE, node_type.value),
(NodeTlvs.NAME, "n1"),
(NodeTlvs.X_POSITION, 0),
(NodeTlvs.Y_POSITION, 0),
(NodeTlvs.MODEL, model),
],
)
coretlv.handle_message(message)
assert coretlv.session.get_node(node_id) is not None
def test_node_update(self, coretlv):
node_id = 1
coretlv.session.add_node(_id=node_id)
x = 50
y = 100
message = coreapi.CoreNodeMessage.create(
0,
[
(NodeTlvs.NUMBER, node_id),
(NodeTlvs.X_POSITION, x),
(NodeTlvs.Y_POSITION, y),
],
)
coretlv.handle_message(message)
node = coretlv.session.get_node(node_id)
assert node is not None
assert node.position.x == x
assert node.position.y == y
def test_node_delete(self, coretlv):
node_id = 1
coretlv.session.add_node(_id=node_id)
message = coreapi.CoreNodeMessage.create(
MessageFlags.DELETE.value, [(NodeTlvs.NUMBER, node_id)]
)
coretlv.handle_message(message)
with pytest.raises(CoreError):
coretlv.session.get_node(node_id)
def test_link_add_node_to_net(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
switch = 2
coretlv.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
def test_link_add_net_to_node(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
switch = 2
coretlv.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, switch),
(LinkTlvs.N2_NUMBER, node_one),
(LinkTlvs.INTERFACE2_NUMBER, 0),
(LinkTlvs.INTERFACE2_IP4, interface_one),
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
def test_link_add_node_to_node(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
node_two = 2
coretlv.session.add_node(_id=node_two)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
interface_two = str(ip_prefix[node_two])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, node_two),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
(LinkTlvs.INTERFACE2_NUMBER, 0),
(LinkTlvs.INTERFACE2_IP4, interface_two),
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
all_links = []
for node_id in coretlv.session.nodes:
node = coretlv.session.nodes[node_id]
all_links += node.all_link_data(0)
assert len(all_links) == 1
def test_link_update(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
switch = 2
coretlv.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
link = all_links[0]
assert link.bandwidth is None
bandwidth = 50000
message = coreapi.CoreLinkMessage.create(
0,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.BANDWIDTH, bandwidth),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
link = all_links[0]
assert link.bandwidth == bandwidth
def test_link_delete_node_to_node(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
node_two = 2
coretlv.session.add_node(_id=node_two)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
interface_two = str(ip_prefix[node_two])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, node_two),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
(LinkTlvs.INTERFACE2_IP4, interface_two),
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
all_links = []
for node_id in coretlv.session.nodes:
node = coretlv.session.nodes[node_id]
all_links += node.all_link_data(0)
assert len(all_links) == 1
message = coreapi.CoreLinkMessage.create(
MessageFlags.DELETE.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, node_two),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE2_NUMBER, 0),
],
)
coretlv.handle_message(message)
all_links = []
for node_id in coretlv.session.nodes:
node = coretlv.session.nodes[node_id]
all_links += node.all_link_data(0)
assert len(all_links) == 0
def test_link_delete_node_to_net(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
switch = 2
coretlv.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
message = coreapi.CoreLinkMessage.create(
MessageFlags.DELETE.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 0
def test_link_delete_net_to_node(self, coretlv):
node_one = 1
coretlv.session.add_node(_id=node_one)
switch = 2
coretlv.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
ip_prefix = netaddr.IPNetwork("10.0.0.0/24")
interface_one = str(ip_prefix[node_one])
message = coreapi.CoreLinkMessage.create(
MessageFlags.ADD.value,
[
(LinkTlvs.N1_NUMBER, node_one),
(LinkTlvs.N2_NUMBER, switch),
(LinkTlvs.INTERFACE1_NUMBER, 0),
(LinkTlvs.INTERFACE1_IP4, interface_one),
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 1
message = coreapi.CoreLinkMessage.create(
MessageFlags.DELETE.value,
[
(LinkTlvs.N1_NUMBER, switch),
(LinkTlvs.N2_NUMBER, node_one),
(LinkTlvs.INTERFACE2_NUMBER, 0),
],
)
coretlv.handle_message(message)
switch_node = coretlv.session.get_node(switch)
all_links = switch_node.all_link_data(0)
assert len(all_links) == 0
def test_session_update(self, coretlv):
session_id = coretlv.session.id
name = "test"
message = coreapi.CoreSessionMessage.create(
0, [(SessionTlvs.NUMBER, str(session_id)), (SessionTlvs.NAME, name)]
)
coretlv.handle_message(message)
assert coretlv.session.name == name
def test_session_query(self, coretlv):
coretlv.dispatch_replies = mock.MagicMock()
message = coreapi.CoreSessionMessage.create(MessageFlags.STRING.value, [])
coretlv.handle_message(message)
args, _ = coretlv.dispatch_replies.call_args
replies = args[0]
assert len(replies) == 1
def test_session_join(self, coretlv):
coretlv.dispatch_replies = mock.MagicMock()
session_id = coretlv.session.id
message = coreapi.CoreSessionMessage.create(
MessageFlags.ADD.value, [(SessionTlvs.NUMBER, str(session_id))]
)
coretlv.handle_message(message)
assert coretlv.session.id == session_id
def test_session_delete(self, coretlv):
assert len(coretlv.coreemu.sessions) == 1
session_id = coretlv.session.id
message = coreapi.CoreSessionMessage.create(
MessageFlags.DELETE.value, [(SessionTlvs.NUMBER, str(session_id))]
)
coretlv.handle_message(message)
assert len(coretlv.coreemu.sessions) == 0
def test_file_hook_add(self, coretlv):
state = EventTypes.DATACOLLECT_STATE.value
assert coretlv.session._hooks.get(state) is None
file_name = "test.sh"
file_data = "echo hello"
message = coreapi.CoreFileMessage.create(
MessageFlags.ADD.value,
[
(FileTlvs.TYPE, f"hook:{state}"),
(FileTlvs.NAME, file_name),
(FileTlvs.DATA, file_data),
],
)
coretlv.handle_message(message)
hooks = coretlv.session._hooks.get(state)
assert len(hooks) == 1
name, data = hooks[0]
assert file_name == name
assert file_data == data
def test_file_service_file_set(self, coretlv):
node = coretlv.session.add_node()
service = "DefaultRoute"
file_name = "defaultroute.sh"
file_data = "echo hello"
message = coreapi.CoreFileMessage.create(
MessageFlags.ADD.value,
[
(FileTlvs.NODE, node.id),
(FileTlvs.TYPE, f"service:{service}"),
(FileTlvs.NAME, file_name),
(FileTlvs.DATA, file_data),
],
)
coretlv.handle_message(message)
service_file = coretlv.session.services.get_service_file(
node, service, file_name
)
assert file_data == service_file.data
def test_file_node_file_copy(self, request, coretlv):
file_name = "/var/log/test/node.log"
node = coretlv.session.add_node()
node.makenodedir()
file_data = "echo hello"
message = coreapi.CoreFileMessage.create(
MessageFlags.ADD.value,
[
(FileTlvs.NODE, node.id),
(FileTlvs.NAME, file_name),
(FileTlvs.DATA, file_data),
],
)
coretlv.handle_message(message)
if not request.config.getoption("mock"):
directory, basename = os.path.split(file_name)
created_directory = directory[1:].replace("/", ".")
create_path = os.path.join(node.nodedir, created_directory, basename)
assert os.path.exists(create_path)
def test_exec_node_tty(self, coretlv):
coretlv.dispatch_replies = mock.MagicMock()
node = coretlv.session.add_node()
message = coreapi.CoreExecMessage.create(
MessageFlags.TTY.value,
[
(ExecuteTlvs.NODE, node.id),
(ExecuteTlvs.NUMBER, 1),
(ExecuteTlvs.COMMAND, "bash"),
],
)
coretlv.handle_message(message)
args, _ = coretlv.dispatch_replies.call_args
replies = args[0]
assert len(replies) == 1
def test_exec_local_command(self, request, coretlv):
if request.config.getoption("mock"):
pytest.skip("mocking calls")
coretlv.dispatch_replies = mock.MagicMock()
node = coretlv.session.add_node()
cmd = "echo hello"
message = coreapi.CoreExecMessage.create(
MessageFlags.TEXT.value | MessageFlags.LOCAL.value,
[
(ExecuteTlvs.NODE, node.id),
(ExecuteTlvs.NUMBER, 1),
(ExecuteTlvs.COMMAND, cmd),
],
)
coretlv.handle_message(message)
args, _ = coretlv.dispatch_replies.call_args
replies = args[0]
assert len(replies) == 1
def test_exec_node_command(self, coretlv):
coretlv.dispatch_replies = mock.MagicMock()
node = coretlv.session.add_node()
cmd = "echo hello"
message = coreapi.CoreExecMessage.create(
MessageFlags.TEXT.value,
[
(ExecuteTlvs.NODE, node.id),
(ExecuteTlvs.NUMBER, 1),
(ExecuteTlvs.COMMAND, cmd),
],
)
node.cmd = MagicMock(return_value="hello")
coretlv.handle_message(message)
node.cmd.assert_called_with(cmd)
@pytest.mark.parametrize(
"state",
[
EventTypes.SHUTDOWN_STATE,
EventTypes.RUNTIME_STATE,
EventTypes.DATACOLLECT_STATE,
EventTypes.CONFIGURATION_STATE,
EventTypes.DEFINITION_STATE,
],
)
def test_event_state(self, coretlv, state):
message = coreapi.CoreEventMessage.create(0, [(EventTlvs.TYPE, state.value)])
coretlv.handle_message(message)
assert coretlv.session.state == state.value
def test_event_schedule(self, coretlv):
coretlv.session.add_event = mock.MagicMock()
node = coretlv.session.add_node()
message = coreapi.CoreEventMessage.create(
MessageFlags.ADD.value,
[
(EventTlvs.TYPE, EventTypes.SCHEDULED.value),
(EventTlvs.TIME, str(time.monotonic() + 100)),
(EventTlvs.NODE, node.id),
(EventTlvs.NAME, "event"),
(EventTlvs.DATA, "data"),
],
)
coretlv.handle_message(message)
coretlv.session.add_event.assert_called_once()
def test_event_save_xml(self, coretlv, tmpdir):
xml_file = tmpdir.join("coretlv.session.xml")
file_path = xml_file.strpath
coretlv.session.add_node()
message = coreapi.CoreEventMessage.create(
0,
[(EventTlvs.TYPE, EventTypes.FILE_SAVE.value), (EventTlvs.NAME, file_path)],
)
coretlv.handle_message(message)
assert os.path.exists(file_path)
def test_event_open_xml(self, coretlv, tmpdir):
xml_file = tmpdir.join("coretlv.session.xml")
file_path = xml_file.strpath
node = coretlv.session.add_node()
coretlv.session.save_xml(file_path)
coretlv.session.delete_node(node.id)
message = coreapi.CoreEventMessage.create(
0,
[(EventTlvs.TYPE, EventTypes.FILE_OPEN.value), (EventTlvs.NAME, file_path)],
)
coretlv.handle_message(message)
assert coretlv.session.get_node(node.id)
@pytest.mark.parametrize(
"state",
[
EventTypes.START,
EventTypes.STOP,
EventTypes.RESTART,
EventTypes.PAUSE,
EventTypes.RECONFIGURE,
],
)
def test_event_service(self, coretlv, state):
coretlv.session.broadcast_event = mock.MagicMock()
node = coretlv.session.add_node()
message = coreapi.CoreEventMessage.create(
0,
[
(EventTlvs.TYPE, state.value),
(EventTlvs.NODE, node.id),
(EventTlvs.NAME, "service:DefaultRoute"),
],
)
coretlv.handle_message(message)
coretlv.session.broadcast_event.assert_called_once()
@pytest.mark.parametrize(
"state",
[
EventTypes.START,
EventTypes.STOP,
EventTypes.RESTART,
EventTypes.PAUSE,
EventTypes.RECONFIGURE,
],
)
def test_event_mobility(self, coretlv, state):
message = coreapi.CoreEventMessage.create(
0, [(EventTlvs.TYPE, state.value), (EventTlvs.NAME, "mobility:ns2script")]
)
coretlv.handle_message(message)
def test_register_gui(self, coretlv):
message = coreapi.CoreRegMessage.create(0, [(RegisterTlvs.GUI, "gui")])
coretlv.handle_message(message)
def test_register_xml(self, coretlv, tmpdir):
xml_file = tmpdir.join("coretlv.session.xml")
file_path = xml_file.strpath
node = coretlv.session.add_node()
coretlv.session.save_xml(file_path)
coretlv.session.delete_node(node.id)
message = coreapi.CoreRegMessage.create(
0, [(RegisterTlvs.EXECUTE_SERVER, file_path)]
)
coretlv.session.instantiate()
coretlv.handle_message(message)
assert coretlv.coreemu.sessions[1].get_node(node.id)
def test_register_python(self, coretlv, tmpdir):
xml_file = tmpdir.join("test.py")
file_path = xml_file.strpath
with open(file_path, "w") as f:
f.write("coreemu = globals()['coreemu']\n")
f.write(f"session = coreemu.sessions[{coretlv.session.id}]\n")
f.write("session.add_node()\n")
message = coreapi.CoreRegMessage.create(
0, [(RegisterTlvs.EXECUTE_SERVER, file_path)]
)
coretlv.session.instantiate()
coretlv.handle_message(message)
assert len(coretlv.session.nodes) == 1
def test_config_all(self, coretlv):
message = coreapi.CoreConfMessage.create(
MessageFlags.ADD.value,
[(ConfigTlvs.OBJECT, "all"), (ConfigTlvs.TYPE, ConfigFlags.RESET.value)],
)
coretlv.session.location.refxyz = (10, 10, 10)
coretlv.handle_message(message)
assert coretlv.session.location.refxyz == (0, 0, 0)
def test_config_options_request(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "session"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_options_update(self, coretlv):
test_key = "test"
test_value = "test"
values = {test_key: test_value}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "session"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
coretlv.handle_message(message)
assert coretlv.session.options.get_config(test_key) == test_value
def test_config_location_reset(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "location"),
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
],
)
coretlv.session.location.refxyz = (10, 10, 10)
coretlv.handle_message(message)
assert coretlv.session.location.refxyz == (0, 0, 0)
def test_config_location_update(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "location"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, "10|10|70|50|0|0.5"),
],
)
coretlv.handle_message(message)
assert coretlv.session.location.refxyz == (10, 10, 0.0)
assert coretlv.session.location.refgeo == (70, 50, 0)
assert coretlv.session.location.refscale == 0.5
def test_config_metadata_request(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "metadata"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_metadata_update(self, coretlv):
test_key = "test"
test_value = "test"
values = {test_key: test_value}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "metadata"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
coretlv.handle_message(message)
assert coretlv.session.metadata[test_key] == test_value
def test_config_broker_request(self, coretlv):
server = "test"
host = "10.0.0.1"
port = 50000
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "broker"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, f"{server}:{host}:{port}"),
],
)
coretlv.session.distributed.add_server = mock.MagicMock()
coretlv.handle_message(message)
coretlv.session.distributed.add_server.assert_called_once_with(server, host)
def test_config_services_request_all(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "services"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_services_request_specific(self, coretlv):
node = coretlv.session.add_node()
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, node.id),
(ConfigTlvs.OBJECT, "services"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
(ConfigTlvs.OPAQUE, "service:DefaultRoute"),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_services_request_specific_file(self, coretlv):
node = coretlv.session.add_node()
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, node.id),
(ConfigTlvs.OBJECT, "services"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
(ConfigTlvs.OPAQUE, "service:DefaultRoute:defaultroute.sh"),
],
)
coretlv.session.broadcast_file = mock.MagicMock()
coretlv.handle_message(message)
coretlv.session.broadcast_file.assert_called_once()
def test_config_services_reset(self, coretlv):
node = coretlv.session.add_node()
service = "DefaultRoute"
coretlv.session.services.set_service(node.id, service)
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "services"),
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
],
)
assert coretlv.session.services.get_service(node.id, service) is not None
coretlv.handle_message(message)
assert coretlv.session.services.get_service(node.id, service) is None
def test_config_services_set(self, coretlv):
node = coretlv.session.add_node()
service = "DefaultRoute"
values = {"meta": "metadata"}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, node.id),
(ConfigTlvs.OBJECT, "services"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.OPAQUE, f"service:{service}"),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
assert coretlv.session.services.get_service(node.id, service) is None
coretlv.handle_message(message)
assert coretlv.session.services.get_service(node.id, service) is not None
def test_config_mobility_reset(self, coretlv):
wlan = coretlv.session.add_node(_type=NodeTypes.WIRELESS_LAN)
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "MobilityManager"),
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
],
)
coretlv.session.mobility.set_model_config(wlan.id, BasicRangeModel.name, {})
assert len(coretlv.session.mobility.node_configurations) == 1
coretlv.handle_message(message)
assert len(coretlv.session.mobility.node_configurations) == 0
def test_config_mobility_model_request(self, coretlv):
wlan = coretlv.session.add_node(_type=NodeTypes.WIRELESS_LAN)
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, wlan.id),
(ConfigTlvs.OBJECT, BasicRangeModel.name),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_mobility_model_update(self, coretlv):
wlan = coretlv.session.add_node(_type=NodeTypes.WIRELESS_LAN)
config_key = "range"
config_value = "1000"
values = {config_key: config_value}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, wlan.id),
(ConfigTlvs.OBJECT, BasicRangeModel.name),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
coretlv.handle_message(message)
config = coretlv.session.mobility.get_model_config(
wlan.id, BasicRangeModel.name
)
assert config[config_key] == config_value
def test_config_emane_model_request(self, coretlv):
wlan = coretlv.session.add_node(_type=NodeTypes.WIRELESS_LAN)
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, wlan.id),
(ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_emane_model_update(self, coretlv):
wlan = coretlv.session.add_node(_type=NodeTypes.WIRELESS_LAN)
config_key = "distance"
config_value = "50051"
values = {config_key: config_value}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.NODE, wlan.id),
(ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
coretlv.handle_message(message)
config = coretlv.session.emane.get_model_config(
wlan.id, EmaneIeee80211abgModel.name
)
assert config[config_key] == config_value
def test_config_emane_request(self, coretlv):
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "emane"),
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
],
)
coretlv.handle_broadcast_config = mock.MagicMock()
coretlv.handle_message(message)
coretlv.handle_broadcast_config.assert_called_once()
def test_config_emane_update(self, coretlv):
config_key = "eventservicedevice"
config_value = "eth4"
values = {config_key: config_value}
message = coreapi.CoreConfMessage.create(
0,
[
(ConfigTlvs.OBJECT, "emane"),
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
(ConfigTlvs.VALUES, dict_to_str(values)),
],
)
coretlv.handle_message(message)
config = coretlv.session.emane.get_configs()
assert config[config_key] == config_value
| 33.132716
| 88
| 0.589412
|
36f871b4f33d2a47025528d6bbaf5ebdf20e71b0
| 49,849
|
py
|
Python
|
sfepy/terms/terms.py
|
antonykamp/sfepy
|
8213d3c8cc2825602b41dc65eb543b575856ca8c
|
[
"BSD-3-Clause"
] | 1
|
2021-05-15T16:28:45.000Z
|
2021-05-15T16:28:45.000Z
|
sfepy/terms/terms.py
|
Moyunning/sfepy
|
127ab753a2f4f24ed359d0152088d11227c3dd49
|
[
"BSD-3-Clause"
] | null | null | null |
sfepy/terms/terms.py
|
Moyunning/sfepy
|
127ab753a2f4f24ed359d0152088d11227c3dd49
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import re
from copy import copy
import numpy as nm
from sfepy.base.base import (as_float_or_complex, get_default, assert_,
Container, Struct, basestr, goptions)
from sfepy.base.compat import in1d
# Used for imports in term files.
from sfepy.terms.extmods import terms
import six
from six.moves import range
from functools import reduce
_match_args = re.compile('^([^\(\}]*)\((.*)\)$').match
_match_virtual = re.compile('^virtual$').match
_match_state = re.compile('^state(_[_a-zA-Z0-9]+)?$').match
_match_parameter = re.compile('^parameter(_[_a-zA-Z0-9]+)?$').match
_match_material = re.compile('^material(_[_a-zA-Z0-9]+)?$').match
_match_material_opt = re.compile('^opt_material(_[_a-zA-Z0-9]+)?$').match
_match_material_root = re.compile('(.+)\.(.*)').match
_match_ts = re.compile('^ts$').match
def get_arg_kinds(arg_types):
"""
Translate `arg_types` of a Term to a canonical form.
Parameters
----------
arg_types : tuple of strings
The term argument types, as given in the `arg_types` attribute.
Returns
-------
arg_kinds : list of strings
The argument kinds - one of 'virtual_variable', 'state_variable',
'parameter_variable', 'opt_material', 'ts', 'user'.
"""
arg_kinds = []
for ii, arg_type in enumerate(arg_types):
if _match_virtual(arg_type):
arg_kinds.append('virtual_variable')
elif _match_state(arg_type):
arg_kinds.append('state_variable')
elif _match_parameter(arg_type):
arg_kinds.append('parameter_variable')
elif _match_material(arg_type):
arg_kinds.append('material')
elif _match_material_opt(arg_type):
arg_kinds.append('opt_material')
if ii > 0:
msg = 'opt_material at position %d, must be at 0!' % ii
raise ValueError(msg)
elif _match_ts(arg_type):
arg_kinds.append('ts')
else:
arg_kinds.append('user')
return arg_kinds
def get_shape_kind(integration):
"""
Get data shape kind for given integration type.
"""
if integration == 'surface':
shape_kind = 'surface'
elif integration in ('volume', 'plate', 'surface_extra'):
shape_kind = 'volume'
elif integration == 'point':
shape_kind = 'point'
else:
raise NotImplementedError('unsupported term integration! (%s)'
% integration)
return shape_kind
def split_complex_args(args):
"""
Split complex arguments to real and imaginary parts.
Returns
-------
newargs : dictionary
Dictionary with lists corresponding to `args` such that each
argument of numpy.complex128 data type is split to its real and
imaginary part. The output depends on the number of complex
arguments in 'args':
- 0: list (key 'r') identical to input one
- 1: two lists with keys 'r', 'i' corresponding to real
and imaginary parts
- 2: output dictionary contains four lists:
- 'r' - real(arg1), real(arg2)
- 'i' - imag(arg1), imag(arg2)
- 'ri' - real(arg1), imag(arg2)
- 'ir' - imag(arg1), real(arg2)
"""
newargs = {}
cai = []
for ii, arg in enumerate(args):
if isinstance(arg, nm.ndarray) and (arg.dtype == nm.complex128):
cai.append(ii)
if len(cai) > 0:
newargs['r'] = list(args[:])
newargs['i'] = list(args[:])
arg1 = cai[0]
newargs['r'][arg1] = args[arg1].real.copy()
newargs['i'][arg1] = args[arg1].imag.copy()
if len(cai) == 2:
arg2 = cai[1]
newargs['r'][arg2] = args[arg2].real.copy()
newargs['i'][arg2] = args[arg2].imag.copy()
newargs['ri'] = list(args[:])
newargs['ir'] = list(args[:])
newargs['ri'][arg1] = newargs['r'][arg1]
newargs['ri'][arg2] = newargs['i'][arg2]
newargs['ir'][arg1] = newargs['i'][arg1]
newargs['ir'][arg2] = newargs['r'][arg2]
elif len(cai) > 2:
raise NotImplementedError('more than 2 complex arguments! (%d)'
% len(cai))
else:
newargs['r'] = args[:]
return newargs
def create_arg_parser():
from pyparsing import Literal, Word, delimitedList, Group, \
StringStart, StringEnd, Optional, nums, alphas, alphanums
ident = Word(alphas, alphanums + "_")
inumber = Word("+-" + nums, nums)
history = Optional(Literal('[').suppress() + inumber
+ Literal(']').suppress(), default=0)("history")
history.setParseAction(lambda str, loc, toks: int(toks[0]))
variable = Group(Word(alphas, alphanums + '._') + history)
derivative = Group(Literal('d') + variable\
+ Literal('/').suppress() + Literal('dt'))
trace = Group(Literal('tr')
+ Literal('(').suppress()
+ Optional(ident + Literal(',').suppress(), default=None)
+ variable
+ Literal(')').suppress())
generalized_var = derivative | trace | variable
args = StringStart() + delimitedList(generalized_var) + StringEnd()
return args
class ConnInfo(Struct):
def get_region(self, can_trace=True):
if self.is_trace and can_trace:
return self.region.get_mirror_region(self.trace_region)
else:
return self.region
def get_region_name(self, can_trace=True):
if self.is_trace and can_trace:
reg = self.region.get_mirror_region(self.trace_region)
else:
reg = self.region
if reg is not None:
return reg.name
else:
return None
class Terms(Container):
@staticmethod
def from_desc(term_descs, regions, integrals=None):
"""
Create terms, assign each term its region.
"""
from sfepy.terms import term_table
terms = Terms()
for td in term_descs:
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted(term_table.keys()))
raise ValueError(msg)
try:
region = regions[td.region]
except IndexError:
raise KeyError('region "%s" does not exist!' % td.region)
term = Term.from_desc(constructor, td, region, integrals=integrals)
terms.append(term)
return terms
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for term in self:
aux = [term.sign, term.name, term.arg_str,
term.integral_name, term.region.name]
self.expression.append(aux)
def __mul__(self, other):
out = Terms()
for name, term in self.iteritems():
out.append(term * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = self.copy()
out.append(other)
elif isinstance(other, Terms):
out = Terms(self._objs + other._objs)
else:
raise ValueError('cannot add Terms with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Term):
out = self + (-other)
elif isinstance(other, Terms):
out = self + (-other)
else:
raise ValueError('cannot subtract Terms with %s!' % other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def setup(self):
for term in self:
term.setup()
def assign_args(self, variables, materials, user=None):
"""
Assign all term arguments.
"""
for term in self:
term.assign_args(variables, materials, user)
def get_variable_names(self):
out = []
for term in self:
out.extend(term.get_variable_names())
return list(set(out))
def get_material_names(self):
out = []
for term in self:
out.extend(term.get_material_names())
return list(set(out))
def get_user_names(self):
out = []
for term in self:
out.extend(term.get_user_names())
return list(set(out))
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
integration = 'volume'
geometries = ['1_2', '2_3', '2_4', '3_4', '3_8']
@staticmethod
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if arg_str is not None:
name, arg_str = arg_str.groups()
else:
raise ValueError('bad term syntax! (%s)' % name)
if name in term_table:
constructor = term_table[name]
else:
msg = "term '%s' is not in %s" % (name, sorted(term_table.keys()))
raise ValueError(msg)
obj = constructor(name, arg_str, integral, region, **kwargs)
return obj
@staticmethod
def from_desc(constructor, desc, region, integrals=None):
from sfepy.discrete import Integrals
if integrals is None:
integrals = Integrals()
integral = integrals.get(desc.integral)
obj = constructor(desc.name, desc.args, integral, region)
obj.sign = desc.sign
return obj
def __init__(self, name, arg_str, integral, region, **kwargs):
self.name = name
self.arg_str = arg_str
self.region = region
self._kwargs = kwargs
self._integration = self.integration
self.sign = 1.0
self.set_integral(integral)
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply Term with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = Terms([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, Term):
out = Terms([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
def get_str(self):
return '{:+} * {}.{}.{}({})'.format(
self.sign, self.name, self.integral.order,
self.region.name, self.arg_str)
def set_integral(self, integral):
"""
Set the term integral.
"""
self.integral = integral
if self.integral is not None:
self.integral_name = self.integral.name
def setup(self):
self.function = Struct.get(self, 'function', None)
self.step = 0
self.dt = 1.0
self.is_quasistatic = False
self.has_region = True
self.setup_formal_args()
if self._kwargs:
self.setup_args(**self._kwargs)
else:
self.args = []
def setup_formal_args(self):
self.arg_names = []
self.arg_steps = {}
self.arg_derivatives = {}
self.arg_traces = {}
self.arg_trace_regions = {}
parser = create_arg_parser()
self.arg_desc = parser.parseString(self.arg_str)
for arg in self.arg_desc:
derivative = None
trace = False
trace_region = None
if isinstance(arg[1], int):
name, step = arg
else:
kind = arg[0]
if kind == 'd':
name, step = arg[1]
derivative = arg[2]
elif kind == 'tr':
trace = True
trace_region = arg[1]
name, step = arg[2]
match = _match_material_root(name)
if match:
name = (match.group(1), match.group(2))
self.arg_names.append(name)
self.arg_steps[name] = step
self.arg_derivatives[name] = derivative
self.arg_traces[name] = trace
self.arg_trace_regions[name] = trace_region
def setup_args(self, **kwargs):
self._kwargs = kwargs
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
self.args.append(self._kwargs[arg_name])
else:
self.args.append((self._kwargs[arg_name[0]], arg_name[1]))
self.classify_args()
self.check_args()
def assign_args(self, variables, materials, user=None):
"""
Check term argument existence in variables, materials, user data
and assign the arguments to terms. Also check compatibility of
field and term regions.
"""
if user is None:
user = {}
user.setdefault('ts', Struct())
kwargs = {}
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
if arg_name in variables.names:
kwargs[arg_name] = variables[arg_name]
elif arg_name in user:
kwargs[arg_name] = user[arg_name]
else:
raise ValueError('argument %s not found!' % arg_name)
else:
arg_name = arg_name[0]
if arg_name in materials.names:
kwargs[arg_name] = materials[arg_name]
else:
raise ValueError('material argument %s not found!'
% arg_name)
self.setup_args(**kwargs)
def classify_args(self):
"""
Classify types of the term arguments and find matching call
signature.
A state variable can be in place of a parameter variable and
vice versa.
"""
self.names = Struct(name='arg_names',
material=[], variable=[], user=[],
state=[], virtual=[], parameter=[])
# Prepare for 'opt_material' - just prepend a None argument if needed.
if isinstance(self.arg_types[0], tuple):
arg_types = self.arg_types[0]
else:
arg_types = self.arg_types
if len(arg_types) == (len(self.args) + 1):
self.args.insert(0, (None, None))
self.arg_names.insert(0, (None, None))
if isinstance(self.arg_types[0], tuple):
assert_(len(self.modes) == len(self.arg_types))
# Find matching call signature using variable arguments - material
# and user arguments are ignored!
matched = []
for it, arg_types in enumerate(self.arg_types):
arg_kinds = get_arg_kinds(arg_types)
if self._check_variables(arg_kinds):
matched.append((it, arg_kinds))
if len(matched) == 1:
i_match, arg_kinds = matched[0]
arg_types = self.arg_types[i_match]
self.mode = self.modes[i_match]
elif len(matched) == 0:
msg = 'cannot match arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
msg = 'ambiguous arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
arg_types = self.arg_types
arg_kinds = get_arg_kinds(self.arg_types)
self.mode = Struct.get(self, 'mode', None)
if not self._check_variables(arg_kinds):
raise ValueError('cannot match variables! (%s)'
% self.arg_names)
# Set actual argument types.
self.ats = list(arg_types)
for ii, arg_kind in enumerate(arg_kinds):
name = self.arg_names[ii]
if arg_kind.endswith('variable'):
names = self.names.variable
if arg_kind == 'virtual_variable':
self.names.virtual.append(name)
elif arg_kind == 'state_variable':
self.names.state.append(name)
elif arg_kind == 'parameter_variable':
self.names.parameter.append(name)
elif arg_kind.endswith('material'):
# This should be better checked already in create_arg_parser().
if not isinstance(name, tuple):
raise ValueError('wrong material argument %s of term %s!'
% (name, self.get_str()))
names = self.names.material
else:
names = self.names.user
names.append(name)
self.n_virtual = len(self.names.virtual)
if self.n_virtual > 1:
raise ValueError('at most one virtual variable is allowed! (%d)'
% self.n_virtual)
self.set_arg_types()
self.setup_integration()
def _check_variables(self, arg_kinds):
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind.endswith('variable'):
var = self.args[ii]
check = {'virtual_variable' : var.is_virtual,
'state_variable' : var.is_state_or_parameter,
'parameter_variable' : var.is_state_or_parameter}
if not check[arg_kind]():
return False
else:
return True
def set_arg_types(self):
pass
def check_args(self):
"""
Common checking to all terms.
Check compatibility of field and term regions.
"""
vns = self.get_variable_names()
for name in vns:
field = self._kwargs[name].get_field()
if field is None:
continue
region = self.region
if self.arg_traces[name]:
mreg_name = self.arg_trace_regions[name]
if mreg_name is None:
mreg_name = region.setup_mirror_region(mreg_name,
ret_name=True)
self.arg_trace_regions[name] = mreg_name
else:
region.setup_mirror_region(mreg_name)
region = region.get_mirror_region(mreg_name)
if not nm.all(in1d(region.vertices,
field.region.vertices)):
msg = ('%s: incompatible regions: (self, field %s)'
+ '(%s in %s)') %\
(self.name, field.name,
self.region.vertices, field.region.vertices)
raise ValueError(msg)
def get_variable_names(self):
return self.names.variable
def get_material_names(self):
out = []
for aux in self.names.material:
if aux[0] is not None:
out.append(aux[0])
return out
def get_user_names(self):
return self.names.user
def get_virtual_name(self):
if not self.names.virtual:
return None
var = self.get_virtual_variable()
return var.name
def get_state_names(self):
"""
If variables are given, return only true unknowns whose data are of
the current time step (0).
"""
variables = self.get_state_variables()
return [var.name for var in variables]
def get_parameter_names(self):
return copy(self.names.parameter)
def get_conn_key(self):
"""The key to be used in DOF connectivity information."""
key = (self.name,) + tuple(self.arg_names)
arg_traces = [k for k, v in self.arg_traces.items() if v]
if len(arg_traces) > 0:
atr = arg_traces[-1]
trace = True, self.arg_trace_regions[atr], atr
else:
trace = False, None, None
key += (self.integral_name, self.region.name) + trace
return key
def get_conn_info(self):
vvar = self.get_virtual_variable()
svars = self.get_state_variables()
pvars = self.get_parameter_variables()
all_vars = self.get_variables()
dc_type = self.get_dof_conn_type()
tgs = self.get_geometry_types()
v_tg = None
if vvar is not None:
field = vvar.get_field()
if field is not None:
if vvar.name in tgs:
v_tg = tgs[vvar.name]
else:
v_tg = None
else:
# No virtual variable -> all unknowns are in fact known parameters.
pvars += svars
svars = []
region = self.get_region()
if region is not None:
arg_traces = [k for k, v in self.arg_traces.items() if v]
if len(arg_traces) > 0:
aname = arg_traces[-1]
mreg_name = self.arg_trace_regions[aname]
if mreg_name is None:
mreg_name = region.setup_mirror_region(mreg_name,
ret_name=True)
self.arg_trace_regions[aname] = mreg_name
else:
region.setup_mirror_region(mreg_name)
vals = []
aux_pvars = []
for svar in svars:
# Allow only true state variables.
if not svar.is_state():
aux_pvars.append(svar)
continue
field = svar.get_field()
is_trace = self.arg_traces[svar.name]
trace_region = self.arg_trace_regions[svar.name]
if svar.name in tgs:
ps_tg = tgs[svar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar,
state=svar,
primary=svar,
has_virtual=True,
has_state=True,
is_trace=is_trace,
trace_region=trace_region,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
pvars += aux_pvars
for pvar in pvars:
field = pvar.get_field()
is_trace = self.arg_traces[pvar.name]
trace_region = self.arg_trace_regions[pvar.name]
if pvar.name in tgs:
ps_tg = tgs[pvar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar,
state=None,
primary=pvar.get_primary(),
has_virtual=vvar is not None,
has_state=False,
is_trace=is_trace,
trace_region=trace_region,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
if vvar and (len(vals) == 0):
# No state, parameter variables, just the virtual one.
val = ConnInfo(virtual=vvar,
state=vvar.get_primary(),
primary=vvar.get_primary(),
has_virtual=True,
has_state=False,
is_trace=False,
trace_region=None,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=v_tg,
region=region,
all_vars=all_vars)
vals.append(val)
return vals
def get_args_by_name(self, arg_names):
"""
Return arguments by name.
"""
out = []
for name in arg_names:
try:
ii = self.arg_names.index(name)
except ValueError:
raise ValueError('non-existing argument! (%s)' % name)
out.append(self.args[ii])
return out
def get_args(self, arg_types=None, **kwargs):
"""
Return arguments by type as specified in arg_types (or
self.ats). Arguments in **kwargs can override the ones assigned
at the term construction - this is useful for passing user data.
"""
ats = self.ats
if arg_types is None:
arg_types = ats
args = []
region_name, iorder = self.region.name, self.integral.order
for at in arg_types:
ii = ats.index(at)
arg_name = self.arg_names[ii]
if isinstance(arg_name, basestr):
if arg_name in kwargs:
args.append(kwargs[arg_name])
else:
args.append(self.args[ii])
else:
mat, par_name = self.args[ii]
if mat is not None:
mat_data = mat.get_data((region_name, iorder), par_name)
else:
mat_data = None
args.append(mat_data)
return args
def get_kwargs(self, keys, **kwargs):
"""Extract arguments from **kwargs listed in keys (default is
None)."""
return [kwargs.get(name) for name in keys]
def get_arg_name(self, arg_type, full=False, join=None):
"""
Get the name of the argument specified by `arg_type.`
Parameters
----------
arg_type : str
The argument type string.
full : bool
If True, return the full name. For example, if the name of a
variable argument is 'u' and its time derivative is
requested, the full name is 'du/dt'.
join : str, optional
Optionally, the material argument name tuple can be joined
to a single string using the `join` string.
Returns
-------
name : str
The argument name.
"""
try:
ii = self.ats.index(arg_type)
except ValueError:
return None
name = self.arg_names[ii]
if full:
# Include derivatives.
if self.arg_derivatives[name]:
name = 'd%s/%s' % (name, self.arg_derivatives[name])
if (join is not None) and isinstance(name, tuple):
name = join.join(name)
return name
def setup_integration(self):
self.has_geometry = True
self.geometry_types = {}
if isinstance(self.integration, basestr):
for var in self.get_variables():
self.geometry_types[var.name] = self.integration
else:
if self.mode is not None:
self.integration = self._integration[self.mode]
if self.integration is not None:
for arg_type, gtype in six.iteritems(self.integration):
var = self.get_args(arg_types=[arg_type])[0]
self.geometry_types[var.name] = gtype
gtypes = list(set(self.geometry_types.values()))
if 'surface_extra' in gtypes:
self.dof_conn_type = 'volume'
elif len(gtypes):
self.dof_conn_type = gtypes[0]
def get_region(self):
return self.region
def get_geometry_types(self):
"""
Returns
-------
out : dict
The required geometry types for each variable argument.
"""
return self.geometry_types
def get_dof_conn_type(self):
return Struct(name='dof_conn_info', type=self.dof_conn_type,
region_name=self.region.name)
def get_assembling_cells(self, shape=None):
"""
Return the assembling cell indices into a DOF connectivity.
"""
cells = nm.arange(shape[0], dtype=nm.int32)
return cells
def time_update(self, ts):
if ts is not None:
self.step = ts.step
self.dt = ts.dt
self.is_quasistatic = ts.is_quasistatic
if 'ts' in self._kwargs:
self._kwargs['ts'].update(ts)
def advance(self, ts):
"""
Advance to the next time step. Implemented in subclasses.
"""
def get_vector(self, variable):
"""Get the vector stored in `variable` according to self.arg_steps
and self.arg_derivatives. Supports only the backward difference w.r.t.
time."""
name = variable.name
return variable(step=self.arg_steps[name],
derivative=self.arg_derivatives[name])
def get_variables(self, as_list=True):
if as_list:
variables = self.get_args_by_name(self.names.variable)
else:
variables = {}
for var in self.get_args_by_name(self.names.variable):
variables[var.name] = var
return variables
def get_virtual_variable(self):
aux = self.get_args_by_name(self.names.virtual)
if len(aux) == 1:
var = aux[0]
else:
var = None
return var
def get_state_variables(self, unknown_only=False):
variables = self.get_args_by_name(self.names.state)
if unknown_only:
variables = [var for var in variables
if (var.kind == 'unknown') and
(self.arg_steps[var.name] == 0)]
return variables
def get_parameter_variables(self):
return self.get_args_by_name(self.names.parameter)
def get_materials(self, join=False):
materials = self.get_args_by_name(self.names.material)
for mat in materials:
if mat[0] is None:
materials.remove(mat)
if join:
materials = list(set(mat[0] for mat in materials))
return materials
def get_qp_key(self):
"""
Return a key identifying uniquely the term quadrature points.
"""
return (self.region.name, self.integral.order)
def get_physical_qps(self):
"""
Get physical quadrature points corresponding to the term region
and integral.
"""
from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs
if self.integration == 'point':
phys_qps = PhysicalQPs()
else:
phys_qps = get_physical_qps(self.region, self.integral)
return phys_qps
def get_mapping(self, variable, get_saved=False, return_key=False):
"""
Get the reference mapping from a variable.
Notes
-----
This is a convenience wrapper of Field.get_mapping() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
mreg_name = self.arg_trace_regions[variable.name]
region = self.region.get_mirror_region(mreg_name)
else:
region = self.region
out = variable.field.get_mapping(region,
self.integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_data_shape(self, variable):
"""
Get data shape information from variable.
Notes
-----
This is a convenience wrapper of FieldVariable.get_data_shape() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
mreg_name = self.arg_trace_regions[variable.name]
region = self.region.get_mirror_region(mreg_name)
else:
region = self.region
out = variable.get_data_shape(self.integral, integration, region.name)
return out
def get(self, variable, quantity_name, bf=None, integration=None,
step=None, time_derivative=None):
"""
Get the named quantity related to the variable.
Notes
-----
This is a convenience wrapper of Variable.evaluate() that
initializes the arguments using the term data.
"""
name = variable.name
step = get_default(step, self.arg_steps[name])
time_derivative = get_default(time_derivative,
self.arg_derivatives[name])
integration = get_default(integration, self.geometry_types[name])
data = variable.evaluate(mode=quantity_name,
region=self.region, integral=self.integral,
integration=integration,
step=step, time_derivative=time_derivative,
is_trace=self.arg_traces[name], bf=bf,
trace_region=self.arg_trace_regions[name])
return data
def check_shapes(self, *args, **kwargs):
"""
Check term argument shapes at run-time.
"""
from sfepy.base.base import output
from sfepy.mechanics.tensors import dim2sym
dim = self.region.dim
sym = dim2sym(dim)
def _parse_scalar_shape(sh):
if isinstance(sh, basestr):
if sh == 'D':
return dim
elif sh == 'D2':
return dim**2
elif sh == 'S':
return sym
elif sh == 'N': # General number.
return nm.inf
elif sh == 'str':
return 'str'
else:
return int(sh)
else:
return sh
def _parse_tuple_shape(sh):
if isinstance(sh, basestr):
return tuple((_parse_scalar_shape(ii.strip())
for ii in sh.split(',')))
else:
return (int(sh),)
arg_kinds = get_arg_kinds(self.ats)
arg_shapes_list = self.arg_shapes
if not isinstance(arg_shapes_list, list):
arg_shapes_list = [arg_shapes_list]
# Loop allowed shapes until a match is found, else error.
allowed_shapes = []
prev_shapes = {}
actual_shapes = {}
for _arg_shapes in arg_shapes_list:
# Unset shapes are taken from the previous iteration.
arg_shapes = copy(prev_shapes)
arg_shapes.update(_arg_shapes)
prev_shapes = arg_shapes
allowed_shapes.append(arg_shapes)
n_ok = 0
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind in ('user', 'ts'):
n_ok += 1
continue
arg = args[ii]
key = '%s:%s' % (self.ats[ii], self.arg_names[ii])
if self.mode is not None:
extended_ats = self.ats[ii] + ('/%s' % self.mode)
else:
extended_ats = self.ats[ii]
try:
sh = arg_shapes[self.ats[ii]]
except KeyError:
sh = arg_shapes[extended_ats]
if arg_kind.endswith('variable'):
n_el, n_qp, _dim, n_en, n_c = self.get_data_shape(arg)
actual_shapes[key] = (n_c,)
shape = _parse_scalar_shape(sh[0] if isinstance(sh, tuple)
else sh)
if nm.isinf(shape):
n_ok += 1
else:
n_ok += shape == n_c
elif arg_kind.endswith('material'):
if arg is None: # Switched-off opt_material.
n_ok += sh is None
continue
if sh is None:
continue
prefix = ''
if isinstance(sh, basestr):
aux = tuple(ii.strip() for ii in sh.split(':'))
if len(aux) == 2:
prefix, sh = aux
if sh == 'str':
n_ok += isinstance(arg, basestr)
continue
shape = _parse_tuple_shape(sh)
ls = len(shape)
aarg = nm.array(arg, ndmin=1)
actual_shapes[key] = aarg.shape
# Substiture general dimension 'N' with actual value.
iinfs = nm.where(nm.isinf(shape))[0]
if len(iinfs):
shape = list(shape)
for iinf in iinfs:
shape[iinf] = aarg.shape[-ls+iinf]
shape = tuple(shape)
if (ls > 1) or (shape[0] > 1):
# Array.
n_ok += shape == aarg.shape[-ls:]
actual_shapes[key] = aarg.shape[-ls:]
elif (ls == 1) and (shape[0] == 1):
# Scalar constant or callable as term argument
from numbers import Number
n_ok += isinstance(arg, Number) or callable(arg)
else:
n_ok += 1
if n_ok == len(arg_kinds):
break
else:
term_str = self.get_str()
output('allowed argument shapes for term "%s":' % term_str)
output(allowed_shapes)
output('actual argument shapes:')
output(actual_shapes)
raise ValueError('wrong arguments shapes for "%s" term! (see above)'
% term_str)
def standalone_setup(self):
from sfepy.discrete import create_adof_conns, Variables
conn_info = {'aux' : self.get_conn_info()}
adcs = create_adof_conns(conn_info, None)
variables = Variables(self.get_variables())
variables.set_adof_conns(adcs)
materials = self.get_materials(join=True)
for mat in materials:
mat.time_update(None, [Struct(terms=[self])])
def call_get_fargs(self, args, kwargs):
try:
fargs = self.get_fargs(*args, **kwargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
return fargs
def call_function(self, out, fargs):
try:
status = self.function(out, *fargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'eval':
status = self.call_function(out, fargs)
# Sum over elements but not over components.
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
status = self.call_function(out, fargs)
return out, status
def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
rout = nm.empty(shape, dtype=nm.float64)
fargsd = split_complex_args(fargs)
# Assuming linear forms. Then the matrix is the
# same both for real and imaginary part.
rstatus = self.call_function(rout, fargsd['r'])
if (diff_var is None) and len(fargsd) >= 2:
iout = nm.empty(shape, dtype=nm.float64)
istatus = self.call_function(iout, fargsd['i'])
if mode == 'eval' and len(fargsd) >= 4:
irout = nm.empty(shape, dtype=nm.float64)
irstatus = self.call_function(irout, fargsd['ir'])
riout = nm.empty(shape, dtype=nm.float64)
ristatus = self.call_function(riout, fargsd['ri'])
out = (rout - iout) + (riout + irout) * 1j
status = rstatus or istatus or ristatus or irstatus
else:
out = rout + 1j * iout
status = rstatus or istatus
else:
out, status = rout + 0j, rstatus
if mode == 'eval':
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
return out, status
def evaluate(self, mode='eval', diff_var=None,
standalone=True, ret_status=False, **kwargs):
"""
Evaluate the term.
Parameters
----------
mode : 'eval' (default), or 'weak'
The term evaluation mode.
Returns
-------
val : float or array
In 'eval' mode, the term returns a single value (the
integral, it does not need to be a scalar), while in 'weak'
mode it returns an array for each element.
status : int, optional
The flag indicating evaluation success (0) or failure
(nonzero). Only provided if `ret_status` is True.
iels : array of ints, optional
The local elements indices in 'weak' mode. Only provided in
non-'eval' modes.
"""
if standalone:
self.standalone_setup()
kwargs = kwargs.copy()
term_mode = kwargs.pop('term_mode', None)
if mode in ('eval', 'el_eval', 'el_avg', 'qp'):
args = self.get_args(**kwargs)
self.check_shapes(*args)
emode = 'eval' if mode == 'el_eval' else mode
_args = tuple(args) + (emode, term_mode, diff_var)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if shape[0] == 0:
val = nm.zeros(shape, dtype=dtype)
status = 0
else:
fargs = self.call_get_fargs(_args, kwargs)
if dtype == nm.float64:
val, status = self.eval_real(shape, fargs, mode,
term_mode,
**kwargs)
elif dtype == nm.complex128:
val, status = self.eval_complex(shape, fargs, mode,
term_mode,
**kwargs)
else:
raise ValueError('unsupported term dtype! (%s)' % dtype)
val *= self.sign
out = (val,)
elif mode == 'weak':
varr = self.get_virtual_variable()
if varr is None:
raise ValueError('no virtual variable in weak mode! (in "%s")'
% self.get_str())
if diff_var is not None:
varc = self.get_variables(as_list=False)[diff_var]
args = self.get_args(**kwargs)
self.check_shapes(*args)
n_elr, n_qpr, dim, n_enr, n_cr = self.get_data_shape(varr)
n_row = n_cr * n_enr
if diff_var is None:
shape = (n_elr, 1, n_row, 1)
else:
n_elc, n_qpc, dim, n_enc, n_cc = self.get_data_shape(varc)
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
if shape[0] == 0:
vals = nm.zeros(shape, dtype=varr.dtype)
status = 0
else:
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
if varr.dtype == nm.float64:
vals, status = self.eval_real(shape, fargs, mode,
term_mode,
diff_var, **kwargs)
elif varr.dtype == nm.complex128:
vals, status = self.eval_complex(shape, fargs, mode,
term_mode,
diff_var, **kwargs)
else:
raise ValueError('unsupported term dtype! (%s)'
% varr.dtype)
if not isinstance(vals, tuple):
vals *= self.sign
iels = self.get_assembling_cells(vals.shape)
else:
vals = (self.sign * vals[0],) + vals[1:]
iels = None
out = (vals, iels)
if goptions['check_term_finiteness']:
assert_(nm.isfinite(out[0]).all(),
msg='"%s" term values not finite!' % self.get_str())
if ret_status:
out = out + (status,)
if len(out) == 1:
out = out[0]
return out
def assemble_to(self, asm_obj, val, iels, mode='vector', diff_var=None):
"""
Assemble the results of term evaluation.
For standard terms, assemble the values in `val` corresponding to
elements/cells `iels` into a vector or a CSR sparse matrix `asm_obj`,
depending on `mode`.
For terms with a dynamic connectivity (e.g. contact terms), in
`'matrix'` mode, return the extra COO sparse matrix instead. The extra
matrix has to be added to the global matrix by the caller. By default,
this is done in :func:`Equations.evaluate()
<sfepy.discrete.equations.Equations.evaluate()>`.
"""
import sfepy.discrete.common.extmods.assemble as asm
vvar = self.get_virtual_variable()
dc_type = self.get_dof_conn_type()
extra = None
if mode == 'vector':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_vector
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_vector_complex
for ii in range(len(val)):
if not(val[ii].dtype == nm.complex128):
val[ii] = nm.complex128(val[ii])
if not isinstance(val, tuple):
dc = vvar.get_dof_conn(dc_type)
assert_(val.shape[2] == dc.shape[1])
assemble(asm_obj, val, iels, 1.0, dc)
else:
vals, rows, var = val
if var.eq_map is not None:
eq = var.eq_map.eq
rows = eq[rows]
active = (rows >= 0)
vals, rows = vals[active], rows[active]
# Assumes no repeated indices in rows!
asm_obj[rows] += vals
elif mode == 'matrix':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_matrix
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_matrix_complex
svar = diff_var
tmd = (asm_obj.data, asm_obj.indptr, asm_obj.indices)
if ((asm_obj.dtype == nm.complex128)
and (val.dtype == nm.float64)):
val = val.astype(nm.complex128)
sign = 1.0
if self.arg_derivatives[svar.name]:
if not self.is_quasistatic or (self.step > 0):
sign *= 1.0 / self.dt
else:
sign = 0.0
if not isinstance(val, tuple):
rdc = vvar.get_dof_conn(dc_type)
is_trace = self.arg_traces[svar.name]
trace_region = self.arg_trace_regions[svar.name]
cdc = svar.get_dof_conn(dc_type, is_trace, trace_region)
assert_(val.shape[2:] == (rdc.shape[1], cdc.shape[1]))
assemble(tmd[0], tmd[1], tmd[2], val, iels, sign, rdc, cdc)
else:
from scipy.sparse import coo_matrix
vals, rows, cols, rvar, cvar = val
if rvar.eq_map is not None:
req, ceq = rvar.eq_map.eq, cvar.eq_map.eq
rows, cols = req[rows], ceq[cols]
active = (rows >= 0) & (cols >= 0)
vals, rows, cols = vals[active], rows[active], cols[active]
extra = coo_matrix((sign * vals, (rows, cols)),
shape=asm_obj.shape)
else:
raise ValueError('unknown assembling mode! (%s)' % mode)
return extra
| 31.253292
| 80
| 0.517523
|
fef8a49653a2cf5b206d375068d17900eb8776dd
| 606
|
py
|
Python
|
filmfestival/migrations/0013_auto_20150610_1459.py
|
mykonosbiennale/mykonosbiennale.github.io
|
fba479807204768ac440c77c4850b64fb25d113d
|
[
"Apache-2.0"
] | 1
|
2017-08-19T01:07:22.000Z
|
2017-08-19T01:07:22.000Z
|
filmfestival/migrations/0013_auto_20150610_1459.py
|
thanos/mykonosbiennale.org
|
ddb53fbe4198ce29c252c72f9dca49ed277c50dc
|
[
"Apache-2.0"
] | 7
|
2015-04-02T04:48:43.000Z
|
2022-03-11T23:14:58.000Z
|
filmfestival/migrations/0013_auto_20150610_1459.py
|
mykonosbiennale/mykonosbiennale.github.io
|
fba479807204768ac440c77c4850b64fb25d113d
|
[
"Apache-2.0"
] | 1
|
2015-04-15T20:56:03.000Z
|
2015-04-15T20:56:03.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('filmfestival', '0012_auto_20150610_1454'),
]
operations = [
migrations.AlterModelOptions(
name='screening',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='screening',
name='order',
field=models.PositiveIntegerField(default=1, editable=False, db_index=True),
preserve_default=False,
),
]
| 24.24
| 88
| 0.59571
|
87b274db799595f219c0dfcf7a9f8b79e096e49d
| 9,479
|
py
|
Python
|
IR_DACF_KW.py
|
LePingKYXK/Dipole-ACF
|
fcbb22c904c6a1019095ef66c5e94f37446357f1
|
[
"BSD-4-Clause-UC"
] | 10
|
2016-09-25T02:45:34.000Z
|
2021-12-26T02:44:20.000Z
|
IR_DACF_KW.py
|
LePingKYXK/Dipole-ACF
|
fcbb22c904c6a1019095ef66c5e94f37446357f1
|
[
"BSD-4-Clause-UC"
] | 2
|
2016-09-23T22:50:55.000Z
|
2017-07-12T00:16:35.000Z
|
IR_DACF_KW.py
|
LePingKYXK/Dipole-ACF
|
fcbb22c904c6a1019095ef66c5e94f37446357f1
|
[
"BSD-4-Clause-UC"
] | 7
|
2016-11-11T04:26:48.000Z
|
2021-05-22T12:18:06.000Z
|
#!/usr/bin/env python
''' This script (VERSION 3.3) is the improved version based on Dr. Kulig's first
version of ir_total_QC.py, which is a pure Python script, without using Numpy,
Scipy and no visulization of the results.
The author renamed it into IR_DACF_KW.py, where "K" refers to Dr. Kulig,
and "W" refers to Dr. Wang.
The main improvement are:
(1) Implementation of the powerful Numpy module, which facilitating the fast
calculation of the data array. The Numpy module accelerates the calculations
dramatically by converting all data lists into data array.
Usually, the calculations would complete within 1 second.
(2) Built a "zero_padding" function. This function dynamically add a series of
zeros to the end of the Dipole moment array before FFT. The length of the whole
data series is the power-of-two (2^n).
*[Note] FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms.The symmetry is highest when n is a power of 2, and the
transform is therefore most efficient for these sizes.
(3) Using built-in fftconvolve function in scipy.signal module for accelerating
the auto-correlation function calculation.
(4) Window Function was taken into consideration for suppressing noise. The
window function is imported from scipy.signal module.
(5) Built a Visualization Function for plotting the results.
Contribution:
Dr. Huan Wang (The 3rd and 2nd version)
Dr. Waldemar Kulig (The 1st version)
E-mail address for contacting the authors:
huan.wang@mail.huji.ac.il or wanghuan@iccas.ac.cn (China)
Copyright:
The Hebrew University of Jerusalem, Givat Ram, Jerusalem, 91904, Israel.
'''
from __future__ import division
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import time
##### PLEASE READ THE FOLLOWING INSTRUCTIONs BEFORE RUNNING SCRIPT #####
#### ####
#### The Format for Running This Script: ####
#### python IR_total_KW.py INPUT_FILE DELTA_T WINDOW OUTPUT_FILE ####
#### ####
#### The values need to input manually when runing this script ####
#### ####
#### (1) INPUT_FILE_NAME: The Total_Dipole_Moment_*.Diople file ####
#### (NOTE: do NOT need to re-split the Dipole file) ####
#### ####
#### (2) DELTA_T: The Time_step set in simulation, in unit of fs ####
#### ####
#### (3) WINDOW: The Name of the Window Function ####
#### ####
#### (4) OUTPUT_FILE_NAME: The Name of the Output File. ####
#### (NOTE: do NOT need to type > sign!) ####
#### ####
############################# Let's Try It! ###########################
#### The values need to input manually when running this script ####
fname = sys.argv[1] # The name of the input file
delta_t = float(sys.argv[2])*1.0e-15 # The time step in unit of femtoseconds
window = sys.argv[3] # The name of the window function
fout = sys.argv[4] # The name of the output file
#### The constants will be used in this script ####
c = 2.9979245899e10 # speed of light in vacuum in [cm/s], from Wikipedia.
kB = 0.6950347 # Boltzman constant in [cm^-1/K], from Wikipedia.
h_bar = 6.283185 # Reduced Planck constant in atomic unit, where h = 2*pi
#beta = 1.0/(kB * T)
#### Functions will used in this script ####
def read_data(fname):
with open(fname, "r") as fo:
dipole = np.genfromtxt(fo, dtype=np.float64,
delimiter=None, usecols=(1,2,3))
return dipole
def calc_derivative(data, delta_t):
dy = np.zeros(np.shape(data))
for i in xrange(3):
dy[:,i] = np.gradient(data[:,i], edge_order=2)
print "dy = ", dy
dy = dy[~(np.absolute(dy) > 0.1).any(1),:]
return np.divide(dy, delta_t)
def zero_padding(sample_data):
'''
A series of Zeros will be padded to the end of the dipole moment array
(before FFT performed), in order to obtain a array with the length which
is the "next power of two" of numbers.
#### Next power of two is calculated as: 2**np.ceil(log2(x))
#### or Nfft = 2**int(math.log(len(data_array)*2-1, 2))
'''
N = 2**int(math.log(len(sample_data)*2-1, 2))
return N
def calc_ACF(array):
'''
This function deals with the auto-correlation function (ACF) of the total
dipole moment derivatives.
With the Wiener-Khintchine theorem, the autocorrelation function is
http://en.wikipedia.org/wiki/Wiener%E2%80%93Khinchin_theorem
####
#### http://stackoverflow.com/questions/4503325/autocorrelation-of-a-multidimensional-array-in-numpy
####
#### for fast convolution
#### http://sebug.net/paper/books/scipydoc/frequency_process.html#id5
'''
# normalization
yunbiased = array - np.mean(array, axis=0)
ynorm = np.sum(np.power(yunbiased,2), axis=0)
# print "the average value of input data array", ynorm
autocor = np.zeros(np.shape(array))
for i in xrange(3):
autocor[:,i] = signal.fftconvolve(array[:,i],
array[:,i][::-1],
mode='full')[len(array)-1:]/ynorm[i]
print "shape of the result3 from signal.FFTcorrelate()", np.shape(autocor)
return autocor
def choose_window(data, kind='string'):
if kind == 'Gaussian':
sigma = 2 * math.sqrt(2 * math.log(2))
window = signal.gaussian(len(data), std=4000.0/sigma, sym=False)
elif kind == 'BH':
window = signal.blackmanharris(len(data), sym=False)
elif kind == 'Hamming':
window = signal.hamming(len(data), sym=False)
elif kind == 'Hann':
window = signal.hann(len(data), sym=False)
return window
def calc_FFT(data, window):
'''
This function is for calculating the "intensity" of the ACF at each
frequency by using the discrete fast Fourier transform.
####
#### http://stackoverflow.com/questions/20165193/fft-normalization
####
'''
window = choose_window(data, kind=window)
WE = sum(window) / len(data)
wf = window / WE
# convolve the window function.
sig = data * wf[None,:].T
# A series of number of zeros will be padded to the end of the DACF \
# array before FFT.
N = zero_padding(sig)
yfft = np.fft.fft(sig, N, axis=0) / len(sig)
# without window function
# yfft = np.fft.fft(data, n=int(N_fft), axis=0) / len(data)
return np.square(np.absolute(yfft))
######## Save The Results to A TEXT File ########
def save_results(fout, wavenumber, intensity):
title = ("Wavenumber", "IR Intensity", "cm^-1", "a.u.")
with open(fout, "w") as fw:
np.savetxt(fout, np.c_[wavenumber[0:5000], intensity[0:5000]],
fmt="%10.5f %15.5e",
header="{0:>10}{1:>16}\n{2:^11}{3:^20}".format(*title),
comments='')
######## Plot The Spectrum by Using Matplotlib module ########
def visualization(D_p, DACF, wavenumber, intensity):
plt.subplot(3,1,1)
L1 = np.arange(len(D_p))
plt.plot(L1, D_p[:,0], color='red', linewidth=1.5)
plt.plot(L1, D_p[:,1], color='green', linewidth=1.5)
plt.plot(L1, D_p[:,2], color='blue', linewidth=1.5)
plt.axis([0, len(D_p), 1.1*np.min(D_p), 1.1*np.max(D_p)], fontsize=15)
plt.xlabel("Data Points", fontsize=15)
plt.ylabel("Derivative of Dipole (a.u.)", fontsize=15)
plt.subplot(3,1,2)
L2 = np.arange(len(DACF))
plt.plot(L2, DACF[:,0], color='red', linewidth=1.5)
plt.plot(L2, DACF[:,1], color='green', linewidth=1.5)
plt.plot(L2, DACF[:,2], color='blue', linewidth=1.5)
plt.axis([0, len(DACF), 1.1*np.min(DACF), 1.1*np.max(DACF)], fontsize=15)
plt.xlabel("Data Points", fontsize=15)
plt.ylabel("DACF (a.u.)", fontsize=15)
plt.subplot(3,1,3)
plt.plot(wavenumber, intensity, color='black', linewidth=1.5)
plt.axis([0, 4000,
-1.1*np.min(intensity), 1.1*np.max(intensity)],
fontsize=15)
plt.xlabel("Wavenumber (cm$^{-1}$)", fontsize=15)
plt.ylabel("Intensity (a.u.)", fontsize=15)
plt.subplots_adjust(hspace = 0.5)
plt.show()
######## The main program ########
if __name__ == '__main__':
start = time.clock() # The system clock, for checking the running speed.
dipole = read_data(fname)
print "dipole \n", dipole, np.shape(dipole)
D_p = calc_derivative(dipole, delta_t)
DACF = calc_ACF(D_p)
yfft = calc_FFT(DACF, window)
print "\n The Shape OF YFFT = ", np.shape(yfft)
wavenumber = np.fft.fftfreq(len(yfft), delta_t*c)[0:int(len(yfft)/2)]
intensity = np.sum(yfft, axis=1)[0:int(len(yfft)/2)]
#### Normalized the intensity
# intensity = intensity/max(intensity)
save_results(fout, wavenumber, intensity)
print "\n Work Completed! Used Time = ", time.clock() - start
visualization(D_p, DACF, wavenumber, intensity)
| 35.501873
| 101
| 0.598797
|
f0ba5be658d70d07e8c26c9d8b61196b30949662
| 1,420
|
py
|
Python
|
app/core/tests/test_admin.py
|
GerryBolanos/recipe-app-api
|
8a3dd2f8bceee86950d89a8eabe8d8584cbf67ac
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
GerryBolanos/recipe-app-api
|
8a3dd2f8bceee86950d89a8eabe8d8584cbf67ac
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
GerryBolanos/recipe-app-api
|
8a3dd2f8bceee86950d89a8eabe8d8584cbf67ac
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
"""Create test client, add new user"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@test.com',
password='password123',
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@test.com',
password='password123',
name='Test User Full Name',
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# Create a URL like so: /admin/core/user/:id/
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertTrue(res.status_code, 200)
| 33.023256
| 68
| 0.633099
|
f65196c5cebb513061671c71a29e1421bc263653
| 1,316
|
py
|
Python
|
tests/conftest.py
|
iwanb/sphinx-autodoc-typehints
|
a25fa05084b4cee7e9e733603b79e68986a0ffee
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
iwanb/sphinx-autodoc-typehints
|
a25fa05084b4cee7e9e733603b79e68986a0ffee
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
iwanb/sphinx-autodoc-typehints
|
a25fa05084b4cee7e9e733603b79e68986a0ffee
|
[
"MIT"
] | 1
|
2020-05-17T17:11:26.000Z
|
2020-05-17T17:11:26.000Z
|
import os
import sys
import pathlib
import shutil
import pytest
from sphinx.testing.path import path
from sphobjinv import Inventory
pytest_plugins = 'sphinx.testing.fixtures'
collect_ignore = ['roots']
@pytest.fixture(scope='session')
def inv(pytestconfig):
cache_path = 'python{v.major}.{v.minor}/objects.inv'.format(v=sys.version_info)
inv_dict = pytestconfig.cache.get(cache_path, None)
if inv_dict is not None:
return Inventory(inv_dict)
print("Downloading objects.inv")
url = 'https://docs.python.org/{v.major}.{v.minor}/objects.inv'.format(v=sys.version_info)
inv = Inventory(url=url)
pytestconfig.cache.set(cache_path, inv.json_dict())
return inv
@pytest.fixture(autouse=True)
def remove_sphinx_projects(sphinx_test_tempdir):
# Remove any directory which appears to be a Sphinx project from
# the temporary directory area.
# See https://github.com/sphinx-doc/sphinx/issues/4040
roots_path = pathlib.Path(sphinx_test_tempdir)
for entry in roots_path.iterdir():
try:
if entry.is_dir() and pathlib.Path(entry, '_build').exists():
shutil.rmtree(str(entry))
except PermissionError:
pass
@pytest.fixture
def rootdir():
return path(os.path.dirname(__file__) or '.').abspath() / 'roots'
| 29.244444
| 94
| 0.705167
|
f29f89cbf65a01d6a423e7fd00f7fdf4f4f28e6a
| 2,056
|
py
|
Python
|
tests/keras/utils/generic_utils_test.py
|
nils-werner/keras
|
78f26df8fb2b8aa5c6262aef44a494a8335a9c6e
|
[
"MIT"
] | 30
|
2017-04-11T04:17:22.000Z
|
2020-09-08T08:18:37.000Z
|
tests/keras/utils/generic_utils_test.py
|
nils-werner/keras
|
78f26df8fb2b8aa5c6262aef44a494a8335a9c6e
|
[
"MIT"
] | null | null | null |
tests/keras/utils/generic_utils_test.py
|
nils-werner/keras
|
78f26df8fb2b8aa5c6262aef44a494a8335a9c6e
|
[
"MIT"
] | 21
|
2017-03-27T08:06:11.000Z
|
2020-06-18T09:35:07.000Z
|
import sys
import pytest
from keras.utils.generic_utils import custom_object_scope, has_arg
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
@pytest.mark.parametrize('fn, name, accept_all, expected', [
('f(x)', 'x', False, True),
('f(x)', 'y', False, False),
('f(x)', 'y', True, False),
('f(x, y)', 'y', False, True),
('f(x, y=1)', 'y', False, True),
('f(x, **kwargs)', 'x', False, True),
('f(x, **kwargs)', 'y', False, False),
('f(x, **kwargs)', 'y', True, True),
('f(x, y=1, **kwargs)', 'y', False, True),
# Keyword-only arguments (Python 3 only)
('f(x, *args, y=1)', 'y', False, True),
('f(x, *args, y=1)', 'z', True, False),
('f(x, *, y=1)', 'x', False, True),
('f(x, *, y=1)', 'y', False, True),
# lambda
(lambda x: x, 'x', False, True),
(lambda x: x, 'y', False, False),
(lambda x: x, 'y', True, False),
])
def test_has_arg(fn, name, accept_all, expected):
if isinstance(fn, str):
context = dict()
try:
exec('def {}: pass'.format(fn), context)
except SyntaxError:
if sys.version_info >= (3,):
raise
pytest.skip('Function is not compatible with Python 2')
context.pop('__builtins__', None) # Sometimes exec adds builtins to the context
fn, = context.values()
assert has_arg(fn, name, accept_all) is expected
@pytest.mark.xfail(sys.version_info < (3, 3),
reason='inspect API does not reveal positional-only arguments')
def test_has_arg_positional_only():
assert has_arg(pow, 'x') is False
if __name__ == '__main__':
pytest.main([__file__])
| 30.686567
| 88
| 0.572471
|
e83ea00c001ad3d2b1e05877b70f433e6a22bea1
| 1,363
|
py
|
Python
|
fuzz/helper.py
|
xumia/debian-openssl
|
f3694133edbc53065100d6cc2250ae6745e63638
|
[
"OpenSSL"
] | 18,396
|
2015-11-11T09:36:37.000Z
|
2022-03-31T23:31:51.000Z
|
fuzz/helper.py
|
xumia/debian-openssl
|
f3694133edbc53065100d6cc2250ae6745e63638
|
[
"OpenSSL"
] | 2,471
|
2015-11-10T04:01:38.000Z
|
2022-03-31T21:37:21.000Z
|
fuzz/helper.py
|
xumia/debian-openssl
|
f3694133edbc53065100d6cc2250ae6745e63638
|
[
"OpenSSL"
] | 4,219
|
2015-11-10T12:17:34.000Z
|
2022-03-31T10:41:43.000Z
|
#!/usr/bin/python
#
# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
"""Fuzzing helper, creates and uses corpus/crash directories.
fuzzer.py <fuzzer> <extra fuzzer arguments>
"""
import os
import subprocess
import sys
FUZZER = sys.argv[1]
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CORPORA_DIR = os.path.abspath(os.path.join(THIS_DIR, "corpora"))
FUZZER_DIR = os.path.abspath(os.path.join(CORPORA_DIR, FUZZER))
if not os.path.isdir(FUZZER_DIR):
os.mkdir(FUZZER_DIR)
corpora = []
def _create(d):
dd = os.path.abspath(os.path.join(CORPORA_DIR, d))
if not os.path.isdir(dd):
os.mkdir(dd)
corpora.append(dd)
def _add(d):
dd = os.path.abspath(os.path.join(CORPORA_DIR, d))
if os.path.isdir(dd):
corpora.append(dd)
def main():
_create(FUZZER)
_create(FUZZER + "-crash")
_add(FUZZER + "-seed")
cmd = ([os.path.abspath(os.path.join(THIS_DIR, FUZZER))] + sys.argv[2:]
+ ["-artifact_prefix=" + corpora[1] + "/"] + corpora)
print(" ".join(cmd))
subprocess.call(cmd)
if __name__ == "__main__":
main()
| 25.716981
| 76
| 0.677916
|
e70e4b50a6610b065c5099e192d010a6c1106b63
| 240
|
py
|
Python
|
xitorch/__init__.py
|
mfkasim91/lintorch
|
7a7da4b960e83c07e45ddb999da99510d3c9e909
|
[
"MIT"
] | 4
|
2020-10-15T15:07:54.000Z
|
2022-01-29T23:01:10.000Z
|
xitorch/__init__.py
|
mfkasim91/lintorch
|
7a7da4b960e83c07e45ddb999da99510d3c9e909
|
[
"MIT"
] | 7
|
2020-09-16T11:44:34.000Z
|
2020-09-24T13:17:19.000Z
|
xitorch/__init__.py
|
mfkasim91/lintorch
|
7a7da4b960e83c07e45ddb999da99510d3c9e909
|
[
"MIT"
] | 2
|
2020-09-17T09:41:33.000Z
|
2020-09-17T10:00:40.000Z
|
from xitorch._core.editable_module import *
from xitorch._core.pure_function import *
from xitorch._core.linop import *
from xitorch.debug.modes import *
from xitorch.version import get_version as _get_version
__version__ = _get_version()
| 30
| 55
| 0.825
|
0e34ef15d5974e6c1eb27689b2411ee481202311
| 2,611
|
py
|
Python
|
src/constants.py
|
BoseSean/db-explain-viz
|
bb2b0fa52558b1ffdb0940cce3a977ccebcae8c3
|
[
"Apache-2.0"
] | 1
|
2018-12-06T08:41:31.000Z
|
2018-12-06T08:41:31.000Z
|
src/constants.py
|
BoseSean/db-explain-viz
|
bb2b0fa52558b1ffdb0940cce3a977ccebcae8c3
|
[
"Apache-2.0"
] | null | null | null |
src/constants.py
|
BoseSean/db-explain-viz
|
bb2b0fa52558b1ffdb0940cce3a977ccebcae8c3
|
[
"Apache-2.0"
] | null | null | null |
from inspect import getmembers, isfunction
NaN = float("NaN")
class Fields:
'''
Fields contain all the possible attribute names of a node in QEP tree
'''
NODE_TYPE = 'Node Type'
ACTUAL_ROWS = 'Actual Rows'
PLAN_ROWS = 'Plan Rows'
ACTUAL_TOTAL_TIME = 'Actual Total Time'
ACTUAL_LOOPS = 'Actual Loops'
ACTUAL_STARTUP_TIME = 'Actual Startup Time'
TOTAL_COST = 'Total Cost'
PLANS = 'Plans'
PLAN = 'Plan'
RELATION_NAME = 'Relation Name'
SCHEMA = 'Schema'
ALIAS = 'Alias'
GROUP_KEY = 'Group Key'
SORT_KEY = 'Sort Key'
JOIN_TYPE = 'Join Type'
INDEX_NAME = 'Index Name'
INDEX_COND = 'Index Cond'
HASH_CONDITION = 'Hash Cond'
CTE_SCAN = "CTE Scan"
CTE_NAME = "CTE Name"
EXECUTION_TIME = 'Execution Time'
COMPUTED_TAGS = '*Tags'
COSTLIEST_NODE = '*Costiest Node (by cost)'
LARGEST_NODE = '*Largest Node (by rows)'
SLOWEST_NODE = '*Slowest Node (by duration)'
MAXIMUM_COSTS = '*Most Expensive Node (cost)'
MAXIMUM_ROWS = '*Largest Node (rows)'
MAXIMUM_DURATION = '*Slowest Node (time)'
ACTUAL_DURATION = '*Actual Duration'
ACTUAL_COST = '*Actual Cost'
PLANNER_ESTIMATE_FACTOR = '*Planner Row Estimate Factor'
PLANNER_ESIMATE_DIRECTION = '*Planner Row Estimate Direction'
@staticmethod
def as_list():
return list(
map(lambda i: i[1],
filter(lambda i: i[0][0]!='_',
getmembers(Fields, lambda x: not isfunction(x))
)
)
)
class NodeTypes:
'''
All the node types (physical operators) of QEP tree
'''
SEQ_SCAN = 'Seq Scan'
INDEX_SCAN = 'Index Scan'
INDEX_ONLY_SCAN = 'Index Only Scan'
BITMAP_HEAP_SCAN = 'Bitmap Heap Scan'
BITMAP_INDEX_SCAN = 'Bitmap Index Scan'
CTE_SCAN = 'CTE Scan'
HASH_JOIN = 'Hash Join'
MERGE_JOIN = 'Merge Join'
NESTED_LOOP = 'Nested Loop'
AGGREGATE = 'Aggregate'
HASH_AGGREGATE = 'Hash Aggregate'
SORT = 'Sort'
LIMIT = 'Limit'
SCAN_NORMAL_TYPES = [SEQ_SCAN, INDEX_SCAN, INDEX_ONLY_SCAN, BITMAP_HEAP_SCAN]
SCAN_INDEX_TYPES = [BITMAP_INDEX_SCAN]
SCAN_TYPES = SCAN_NORMAL_TYPES + SCAN_INDEX_TYPES
AGGREGATE_TYPES = [AGGREGATE, HASH_AGGREGATE]
class Strings:
TITLE = 'QEP Visualizer'
HIGHLIGHT_LABEL = "highlight.TLabel"
BW_LABLE = "BW.TLabel"
PADDING = "3 3 12 12"
COL_EXE_TIME = 'exe_time'
EXE_TIME_TEXT = 'exe_time / ms'
COL_PERCENTAGE = 'percentage'
PERCENTAGE_TEXT = 'percentage'
ATTRIBUTE_TEXT = 'ATTRIBUTE'
VALUE_TEXT = "VALUE"
| 30.011494
| 81
| 0.641517
|
26fa10bdd7cc2e39aec30800a4bc6cb62c1b0136
| 402
|
py
|
Python
|
radio/migrations/0004_system_recorder_uuid.py
|
MaxwellDPS/trunk-player
|
19f116e64249823f3a12b35ed55252db60b1cf78
|
[
"MIT"
] | null | null | null |
radio/migrations/0004_system_recorder_uuid.py
|
MaxwellDPS/trunk-player
|
19f116e64249823f3a12b35ed55252db60b1cf78
|
[
"MIT"
] | 7
|
2021-06-10T23:24:05.000Z
|
2022-03-03T21:48:12.000Z
|
radio/migrations/0004_system_recorder_uuid.py
|
MaxwellDPS/trunk-player
|
19f116e64249823f3a12b35ed55252db60b1cf78
|
[
"MIT"
] | 1
|
2022-03-26T07:04:21.000Z
|
2022-03-26T07:04:21.000Z
|
# Generated by Django 3.2.7 on 2021-09-19 16:52
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('radio', '0003_defaultagenecy'),
]
operations = [
migrations.AddField(
model_name='system',
name='recorder_uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
| 20.1
| 55
| 0.606965
|
031fcbc56203bf898e89b17a9ff3683514a33b46
| 26,957
|
py
|
Python
|
simple_rl/agents/func_approx/sam_stuff/main.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | 1
|
2020-01-30T13:14:24.000Z
|
2020-01-30T13:14:24.000Z
|
simple_rl/agents/func_approx/sam_stuff/main.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | null | null | null |
simple_rl/agents/func_approx/sam_stuff/main.py
|
samlobel/simple_rl_mbrl
|
ed868916d06dbf68f4af23bea83b0e852e88df6e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
from collections import namedtuple, deque, defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pdb
from copy import deepcopy
import shutil
import os
import time
import argparse
import pickle
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from simple_rl.agents.AgentClass import Agent
from simple_rl.agents.func_approx.ddpg.utils import compute_gradient_norm
# from simple_rl.agents.func_approx.sam_stuff.replay_buffer import ReplayBuffer
from simple_rl.agents.func_approx.sam_stuff.model import ConvQNetwork, DenseQNetwork
from simple_rl.agents.func_approx.sam_stuff.epsilon_schedule import *
from simple_rl.tasks.gym.GymMDPClass import GymMDP
from simple_rl.tasks.lunar_lander.LunarLanderMDPClass import LunarLanderMDP
# from simple_rl.agents.func_approx.sam_stuff.RandomNetworkDistillationClass import RNDModel, RunningMeanStd
from simple_rl.agents.func_approx.sam_stuff.RandomNetworkDistillationClass import RunningMeanStd
from simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import DQNAgent
from simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import WorldModel
from simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import OnlineComposer
from simple_rl.agents.func_approx.sam_stuff.ModelQNetworkComposer import Composer
NUM_EPISODES = 3500
NUM_STEPS = 10000
# def test_forward_pass(dqn_agent, mdp):
# # load the weights from file
# mdp.reset()
# state = deepcopy(mdp.init_state)
# overall_reward = 0.
# mdp.render = True
# while not state.is_terminal():
# action = dqn_agent.act(state.features(), train_mode=False)
# reward, next_state = mdp.execute_agent_action(action)
# overall_reward += reward
# state = next_state
# mdp.render = False
# return overall_reward
def show_video(dqn_agent, mdp):
# load the weights from file
mdp.reset()
state = deepcopy(mdp.init_state)
overall_reward = 0.
mdp.render = True
while not state.is_terminal():
action = dqn_agent.act(state.features(), train_mode=False)
reward, next_state = mdp.execute_agent_action(action)
overall_reward += reward
state = next_state
mdp.render = False
return overall_reward
def save_all_scores(experiment_name, log_dir, seed, scores):
print("\rSaving training and validation scores..")
training_scores_file_name = "{}_{}_training_scores.pkl".format(experiment_name, seed)
if log_dir:
training_scores_file_name = os.path.join(log_dir, training_scores_file_name)
with open(training_scores_file_name, "wb+") as _f:
pickle.dump(scores, _f)
def create_log_dir(experiment_name):
path = os.path.join(os.getcwd(), "logs", experiment_name)
try:
os.mkdir(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s " % path)
return path
def test_render(agent, mdp):
while True:
print("Press ctrl-C to quit")
mdp.set_render(True)
mdp.reset()
state = mdp.init_state
while True:
# action = agent.act(state.features(), train_mode=False)
action = agent.get_best_action(state.features())
reward, next_state = mdp.execute_agent_action(action)
state = next_state
game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
if state.is_terminal() or game_over:
print('bye bye')
break
def collect_data_for_bias_variance_calculation(mdp, q_agent, num_runs):
"""
Runs on-policy, and just makes the data that we'll pass to the composer.
"""
exp = namedtuple("Experience", field_names=["state","action","reward","next_state", "done", "time_limit_truncated"])
experiences = []
states = []
actions = []
rewards = []
next_states = []
dones = []
time_limit_truncateds = []
for _ in range(num_runs):
mdp.reset()
state = deepcopy(mdp.init_state)
state = np.asarray(state.features())
true_finish = False
while True:
# action = agent.act(state.features(), train_mode=True)
# reward, next_state = mdp.execute_agent_action(action)
action = composer.q_agent.get_best_action(state)
reward, next_state = mdp.execute_agent_action(action)
# is_terminal = next_state.is_terminal()
# time_limit_truncated = next_state.is_time_limit_truncated()
experiences.append(
exp(state=state,
action=action,
reward=reward,
next_state=np.asarray(next_state.features()),
done=next_state.is_terminal(),
time_limit_truncated=next_state.is_time_limit_truncated()
))
states.append(state)
actions.append(action)
rewards.append(reward)
next_states.append(np.asarray(next_state.features()))
dones.append(next_state.is_terminal())
time_limit_truncateds.append(next_state.is_time_limit_truncated())
game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
if game_over and not next_state.is_terminal():
print('howza')
# import ipdb; ipdb.set_trace()
raise Exception("Honestly, we're just not dealing with this well here.")
if next_state.is_terminal():
break
state = np.asarray(next_state.features())
return experiences
# return dict(
# states=states,
# actions=actions,
# rewards=rewards,
# next_states=next_states,
# dones=dones,
# time_limit_truncateds=time_limit_truncateds,
# )
pass
class Evaluator:
def __init__(self, mdp, composer, num_runs_each=1, rollout_depth=5, lambdas_to_test=None, logdir="."):
self.mdp = mdp
self.composer = composer
self.num_runs_each = num_runs_each
self.rollout_depth = rollout_depth
self.logdir = logdir
self._bias = None
self._variance = None
if lambdas_to_test is None:
self.lambdas_to_test = [0.0, 0.5, 1.0]
else:
self.lambdas_to_test = lambdas_to_test
self.results = defaultdict(list)
def _set_bias_variance(self, num_runs_to_collect_over):
data = collect_data_for_bias_variance_calculation(self.mdp, self.composer.q_agent, num_runs_to_collect_over)
# bias, variance = self.composer.create_bias_variance_from_data(data, self.rollout_depth)
bias, variance, covariance = self.composer.create_bias_variance_covariance_from_data(data, self.rollout_depth)
# print("This is about to be mega self-defeating...")
# self._bias = np.zeros((self.rollout_depth,), dtype=np.float32)
# self._variance = np.ones((self.rollout_depth,), dtype=np.float32)
# self._variance[0] -= 0.999
# self._variance *= 1000
# print("self, defeated")
self._bias = bias
self._variance = variance
self._covariance = covariance
print(f"Bias: {bias}\nVariance: {variance}")
print(f"Covariance: {covariance}")
def evaluate_different_models(self, *, training_steps):
"""
This does the evaluation, prints out results, but then importantly
populates some storage list, which we can then use to make plots.
"""
assert self._bias is not None
assert self._variance is not None
lambdas_to_test = self.lambdas_to_test
# print(self.lambdas_to_test)
mdp = self.mdp
composer = self.composer
num_runs_each = self.num_runs_each
rollout_depth = self.rollout_depth
# lambdas_to_test.reverse()
# funcs = []
print("TODO: I know that it's a scoping and reference problem. Maybe use partials?")
# There's a really annoying referencing problem here. Let's see how it goes.
funcs = [(lam, (lambda l: lambda s: composer.get_best_action_td_lambda(s, rollout_depth, gamma=0.99, lam=l))(lam))
for lam in lambdas_to_test]
# print(funcs)
funcs.append(("OptimalVariance",
lambda s: composer.get_best_action_for_bias_variance(s, rollout_depth, self._bias, self._variance, gamma=0.99)))
funcs.append(("OptimalCovariance",
lambda s: composer.get_best_action_for_bias_covariance(s, rollout_depth, self._bias, self._covariance, gamma=0.99)))
# for lam in lambdas_to_test:
for key, func in funcs:
all_rewards = []
for _ in range(num_runs_each):
mdp.reset()
state = deepcopy(mdp.init_state)
state = np.asarray(state.features())
reward_so_far = 0.0
while True:
# state = torch.from_numpy(state).float().unsqueeze(0).to("cuda")
# action = composer.get_best_action_td_lambda(state, rollout_depth, gamma=0.99, lam=lam)
action = func(state)
# print(action)
reward, next_state = mdp.execute_agent_action(action)
reward_so_far += reward
game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
if next_state.is_terminal() or game_over:
break
state = np.asarray(next_state.features())
self.results[key].append((training_steps, reward_so_far))
all_rewards.append(reward_so_far)
all_rewards = np.asarray(all_rewards)
print(f"{num_runs_each} runs: Key={key}, Reward={np.mean(all_rewards)} ({np.std(all_rewards)})")
print(all_rewards)
def write_graphs(self):
plt.figure()
for lam, vals in self.results.items():
xs, ys = zip(*vals)
ax = sns.lineplot(x=xs, y=ys, label=f"Lam={lam}")
plt.savefig(os.path.join(self.logdir, "results.png"))
# plt.show()
plt.clf()
# def evaluate_different_models(mdp, composer, num_runs_each=1, training_steps=None):
# # Somehow I want to also graph this... How should I do that?
# # I could make this a class, and keep track of past things. But that does
# # seem heavy-handed. How about I start by just printing them out...
# lambdas_to_test = [0.0, 0.5, 1.0]
# rollout_depth = 5
# for lam in lambdas_to_test:
# all_rewards = []
# for _ in range(num_runs_each):
# mdp.reset()
# state = deepcopy(mdp.init_state)
# state = np.asarray(state.features())
# reward_so_far = 0.0
# while True:
# # state = torch.from_numpy(state).float().unsqueeze(0).to("cuda")
# action = composer.get_best_action_td_lambda(state, rollout_depth, gamma=0.99, lam=lam)
# reward, next_state = mdp.execute_agent_action(action)
# reward_so_far += reward
# game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
# if next_state.is_terminal() or game_over:
# break
# state = np.asarray(next_state.features())
# all_rewards.append(reward_so_far)
# all_rewards = np.asarray(all_rewards)
# print(f"{num_runs_each} runs: Lam={lam}, Reward={np.mean(all_rewards)} ({np.std(all_rewards)})")
# print(all_rewards)
def test_optimal(agent, mdp, num_episodes=1):
# Going to return a total reward...
scores = []
for _ in range(num_episodes):
score = 0
mdp.reset()
state = deepcopy(mdp.init_state)
while True:
action = agent.get_best_action(state.features())
qvalues = agent.get_qvalues(state.features())
# print(action)
# print(qvalues)
# print(state.features())
reward, next_state = mdp.execute_agent_action(action)
score += reward
state = next_state
game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
if state.is_terminal() or game_over:
break
scores.append(score)
average_score = np.mean(scores)
print(f"score is {average_score}")
return average_score
def train(agent, mdp, episodes, steps, init_episodes=10, evaluate_every=25, *, save_every, logdir, world_model, composer):
model_save_loc = os.path.join(logdir, 'model.tar')
per_episode_scores = []
last_10_scores = deque(maxlen=100)
iteration_counter = 0
state_ri_buffer = []
# Observation and reward normalization
reward_rms = RunningMeanStd()
obs_rms = RunningMeanStd(shape=(1, 84, 84))
last_save = time.time()
## Commenting this out for now while we switch to something more reasonable.
if composer:
evaluator = Evaluator(mdp, composer, num_runs_each=5, rollout_depth=5, logdir=logdir)
for episode in range(episodes):
if evaluate_every > 0 and episode % evaluate_every == 0 and episode != 0:
print(f"Evaluating on episode {episode}")
test_optimal(agent, mdp)
# test_optimal(composer.q_agent, mdp)
# test_optimal(agent, mdp)
# print("just kidding")
# evaluator._set_bias_variance(10)
# if composer:
# print("Shouldn't be here?")
# evaluator._set_bias_variance(10)
# evaluator.evaluate_different_models(training_steps=episode)
# print("At some point definitely make this a CL-Arg")
# evaluator.write_graphs()
if time.time() - last_save > save_every:
print("Saving Model")
last_save = time.time()
torch.save(agent.state_dict(), model_save_loc)
mdp.reset()
state = deepcopy(mdp.init_state)
observation_buffer = []
init_features = np.asarray(mdp.init_state.features())
if len(init_features.shape) == 3:
init_observation = init_features[-1, :, :]
assert init_observation.shape == (84, 84), init_observation.shape
else:
init_observation = init_features
#### FROM AKHIL
# init_observation = np.array(mdp.init_state.features())[-1, :, :]
# assert init_observation.shape == (84, 84), init_observation.shape
observation_buffer.append(init_observation)
score = 0.
while True:
iteration_counter += 1
action = agent.act(state.features(), train_mode=True)
reward, next_state = mdp.execute_agent_action(action)
agent.step(state.features(), action, reward, next_state.features(), next_state.is_terminal(),
num_steps=1, time_limit_truncated=next_state.is_time_limit_truncated())
agent.update_epsilon()
if world_model:
world_model.step(state.features(), action, reward, next_state.features(), next_state.is_terminal(),
num_steps=1, time_limit_truncated=next_state.is_time_limit_truncated())
state = next_state
score += reward
game_over = mdp.game_over if hasattr(mdp, 'game_over') else False
if state.is_terminal() or game_over:
if agent.tensor_log:
print("Is this happening too?")
agent.writer.add_scalar("Score", score, episode)
break
last_10_scores.append(score)
per_episode_scores.append(score)
print('\rEpisode {}\tAverage Score: {:.2f}\tEpsilon: {:.2f}'.format(episode, np.mean(last_10_scores), agent.epsilon), end="")
if episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\tEpsilon: {:.2f}'.format(episode, np.mean(last_10_scores), agent.epsilon))
return per_episode_scores, state_ri_buffer
def bayes_functional(*, mdp, args):
"""
This will like do the setup and stuff, and then return a singular number at the end.
We would like this to return a function that has all the constants filled in.
Because bayes_opt doesn't seem to have a good way of passing the same thing to
everyone...
"""
def functional(lr_exp, tau_exp):
print(f"Running for {lr_exp} {tau_exp}")
state_dim = overall_mdp.env.observation_space.shape if args.pixel_observation else overall_mdp.env.observation_space.shape[0]
action_dim = len(overall_mdp.actions)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# We're going to pass in something like lr=4, and it'll translate it to 10^-4
# And we'll bound at 0 and 5 or something.
lr = 10**-lr_exp
tau = 10**-tau_exp
print(f"Running for lr_exp={lr_exp} tau_exp={tau_exp}")
print(f"AKA lr={lr} tau={tau}")
ddqn_agent = DQNAgent(state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="GlobalDDQN", lr=lr, tau=tau, tensor_log=args.tensor_log, use_double_dqn=True,
exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
evaluation_epsilon=args.eval_eps,
epsilon_linear_decay=args.epsilon_linear_decay,
use_softmax_target=args.use_softmax_target)
world_model = WorldModel(state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="WorldModel", lr=lr, tensor_log=args.tensor_log,# use_double_dqn=True,
writer = ddqn_agent.writer, # Because I'm concerned it's over-writing...
#exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
#evaluation_epsilon=args.eval_eps,
#epsilon_linear_decay=args.epsilon_linear_decay
)
composer = Composer(
q_agent=ddqn_agent,
world_model=world_model,
action_size=action_dim,
device=device)
train(
ddqn_agent, overall_mdp, args.episodes, args.steps,
save_every=args.save_every, logdir=logdir, world_model=world_model,
composer=composer,
evaluate_every=0)
print("Boom, training complete. Now testing optimal!")
val = test_optimal(ddqn_agent, mdp, num_episodes=25)
return val
return functional
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--experiment_name", type=str, help="Experiment Name")
parser.add_argument("--seed", type=int, help="Random seed for this run (default=0)", default=0)
parser.add_argument("--episodes", type=int, help="# episodes", default=NUM_EPISODES)
parser.add_argument("--steps", type=int, help="# steps", default=NUM_STEPS)
parser.add_argument("--render", type=bool, help="Render the mdp env", default=False)
parser.add_argument("--pixel_observation", action='store_true', help="Images / Dense input", default=False)
parser.add_argument("--exploration_method", type=str, default="eps-greedy")
parser.add_argument("--eval_eps", type=float, default=0.05)
parser.add_argument("--tensor_log", default=False, action='store_true', help="Include this option if you want logging.")
parser.add_argument("--env", type=str, default="Acrobot-v1")
parser.add_argument("--save_every", type=int, help="Save every n seconds", default=60)
parser.add_argument("--mode", type=str, help="'train' or 'view'", default='train')
parser.add_argument("--epsilon_linear_decay", type=int, help="'train' or 'view'", default=100000)
parser.add_argument("--use_softmax_target", default=False, action='store_true', help='When calculating backups, do you use the max or the softmax?')
parser.add_argument("--learning_rate", default=1e-3, type=float, help='What do you think!')
parser.add_argument("--tau", default=1e-3, type=float, help='Target copying rate')
parser.add_argument("--evaluate_every", default=25, type=int, help='Expensive evaluation step for tracking')
parser.add_argument("--use_online_composer", default=False, action="store_true", help='If you include this option, the model is used to make more accurate Q updates')
parser.add_argument("--num_rollouts", default=5, type=int, help='Only used if use_online_composer')
# parser.add_argument("--use_world_model", default=False, action='store_true', help="Include this option if you want to see how a world model trains.")
args = parser.parse_args()
logdir = create_log_dir(args.experiment_name)
model_save_loc = os.path.join(logdir, 'model.tar')
# learning_rate = 1e-3 # 0.00025 for pong
overall_mdp = GymMDP(env_name=args.env, pixel_observation=args.pixel_observation, render=args.render,
clip_rewards=False, term_func=None, seed=args.seed)
### THIS ONE WORKS FINE SO LONG AS YOU HAVE PIXEL OBSERVATIONS ####
# overall_mdp = GymMDP(env_name="MontezumaRevengeNoFrameskip-v0", pixel_observation=args.pixel_observation, render=args.render,
# clip_rewards=False, term_func=None, seed=args.seed)
### END ###
# overall_mdp = GymMDP(env_name="MontezumaRevengeNoFrameskip-v4", pixel_observation=args.pixel_observation, render=args.render,
# clip_rewards=False, term_func=None, seed=args.seed)
# overall_mdp = GymMDP(env_name="CartPole-v0", pixel_observation=args.pixel_observation, render=args.render,
# clip_rewards=False, term_func=None, seed=args.seed)
# overall_mdp = LunarLanderMDP(render=args.render, seed=args.seed)
state_dim = overall_mdp.env.observation_space.shape if args.pixel_observation else overall_mdp.env.observation_space.shape[0]
action_dim = len(overall_mdp.actions)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
if args.use_online_composer:
world_model = WorldModel(state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="WorldModel", lr=args.learning_rate, tensor_log=args.tensor_log,# use_double_dqn=True,
# writer = agent.writer, # Because I'm concerned it's over-writing...
#exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
#evaluation_epsilon=args.eval_eps,
#epsilon_linear_decay=args.epsilon_linear_decay
)
agent = OnlineComposer(
world_model=world_model, num_rollouts=args.num_rollouts,
state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="OnlineComposer",
mixing_speed=0.9999,
lr=args.learning_rate, tau=args.tau,
tensor_log=args.tensor_log, use_double_dqn=True,
writer = world_model.writer, # Because I'm concerned it's oevr-writing.
exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
evaluation_epsilon=args.eval_eps,
epsilon_linear_decay=args.epsilon_linear_decay,
use_softmax_target=args.use_softmax_target)
world_model = None
composer = None
else:
agent = DQNAgent(state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="GlobalDDQN",
lr=args.learning_rate, tau=args.tau,
tensor_log=args.tensor_log, use_double_dqn=True,
exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
evaluation_epsilon=args.eval_eps,
epsilon_linear_decay=args.epsilon_linear_decay,
use_softmax_target=args.use_softmax_target)
world_model = WorldModel(state_size=state_dim, action_size=action_dim,
seed=args.seed, device=device,
name="WorldModel", lr=args.learning_rate, tensor_log=args.tensor_log,# use_double_dqn=True,
writer = agent.writer, # Because I'm concerned it's over-writing...
#exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,
#evaluation_epsilon=args.eval_eps,
#epsilon_linear_decay=args.epsilon_linear_decay
)
composer = Composer(
q_agent=agent,
world_model=world_model,
action_size=action_dim,
device=device)
# data = collect_data_for_bias_variance_calculation(overall_mdp, ddqn_agent, 1)
# bias, variance = composer.create_bias_variance_from_data(data, 5)
if args.mode == 'train':
ddqn_episode_scores, s_ri_buffer = train(
agent, overall_mdp, args.episodes, args.steps, save_every=args.save_every, logdir=logdir, world_model=world_model,
composer=composer, evaluate_every=args.evaluate_every)
save_all_scores(args.experiment_name, logdir, args.seed, ddqn_episode_scores)
elif args.mode == 'view':
print('waow')
print(model_save_loc)
agent.load_state_dict(torch.load(model_save_loc))
test_render(agent, overall_mdp)
pass
elif args.mode == 'hyper':
from bayes_opt import BayesianOptimization
f = bayes_functional(mdp=overall_mdp, args=args)
pbounds = {'lr_exp': (1, 5), 'tau_exp': (1,5)}
optimizer = BayesianOptimization(
f=f,
pbounds=pbounds,
random_state=1,
)
optimizer.maximize(
init_points=5,
n_iter=10,
)
print(optimizer.max)
for i, res in enumerate(optimizer.res):
print("Iteration {}: \n\t{}".format(i, res))
import pdb; pdb.set_trace()
print('bingester')
else:
raise Exception("HEELLOOO")
| 41.472308
| 170
| 0.63171
|
cbab773b5151b5c4bd0aeda45af0b629d25ed47a
| 1,317
|
py
|
Python
|
benchmark/backup_googlenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 40
|
2021-10-19T02:34:56.000Z
|
2022-03-25T07:49:44.000Z
|
benchmark/backup_googlenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 53
|
2021-10-22T02:24:44.000Z
|
2022-03-31T04:20:47.000Z
|
benchmark/backup_googlenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 11
|
2022-01-06T02:57:07.000Z
|
2022-03-23T15:19:51.000Z
|
from benchmark import *
import oneflow_benchmark
from flowvision.models.googlenet import googlenet
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_googlenet_batch_size1(benchmark, net=googlenet, input_shape=[1, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_googlenet_batch_size2(benchmark, net=googlenet, input_shape=[2, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_googlenet_batch_size4(benchmark, net=googlenet, input_shape=[4, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_googlenet_batch_size8(benchmark, net=googlenet, input_shape=[8, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_googlenet_batch_size16(
benchmark, net=googlenet, input_shape=[16, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
| 36.583333
| 87
| 0.7388
|
ad249aad4b58c47c0bc3495f7074208912e483bd
| 26,507
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/artifactregistry/v1alpha1/artifactregistry_v1alpha1_client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/third_party/apis/artifactregistry/v1alpha1/artifactregistry_v1alpha1_client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/artifactregistry/v1alpha1/artifactregistry_v1alpha1_client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
"""Generated client library for artifactregistry version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.artifactregistry.v1alpha1 import artifactregistry_v1alpha1_messages as messages
class ArtifactregistryV1alpha1(base_api.BaseApiClient):
"""Generated client library for service artifactregistry version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://artifactregistry.googleapis.com/'
MTLS_BASE_URL = 'https://artifactregistry.mtls.googleapis.com/'
_PACKAGE = 'artifactregistry'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only']
_VERSION = 'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'ArtifactregistryV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new artifactregistry handle."""
url = url or self.BASE_URL
super(ArtifactregistryV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations_repositories_aptArtifacts = self.ProjectsLocationsRepositoriesAptArtifactsService(self)
self.projects_locations_repositories_aptartifacts = self.ProjectsLocationsRepositoriesAptartifactsService(self)
self.projects_locations_repositories_gooGetArtifacts = self.ProjectsLocationsRepositoriesGooGetArtifactsService(self)
self.projects_locations_repositories_googetartifacts = self.ProjectsLocationsRepositoriesGoogetartifactsService(self)
self.projects_locations_repositories_yumArtifacts = self.ProjectsLocationsRepositoriesYumArtifactsService(self)
self.projects_locations_repositories_yumartifacts = self.ProjectsLocationsRepositoriesYumartifactsService(self)
self.projects_locations_repositories = self.ProjectsLocationsRepositoriesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (ArtifactregistryProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='artifactregistry.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsRepositoriesAptArtifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_aptArtifacts resource."""
_NAME = 'projects_locations_repositories_aptArtifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesAptArtifactsService, self).__init__(client)
self._upload_configs = {
}
def Import(self, request, global_params=None):
r"""Imports Apt artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesAptArtifactsImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/aptArtifacts:import',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.aptArtifacts.import',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/aptArtifacts:import',
request_field='googleDevtoolsArtifactregistryV1alpha1ImportAptArtifactsRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesAptArtifactsImportRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsRepositoriesAptartifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_aptartifacts resource."""
_NAME = 'projects_locations_repositories_aptartifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesAptartifactsService, self).__init__(client)
self._upload_configs = {
'Upload': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=None,
resumable_path=None,
simple_multipart=True,
simple_path='/upload/v1alpha1/{+parent}/aptArtifacts:create',
),
}
def Upload(self, request, global_params=None, upload=None):
r"""Directly uploads an Apt artifact. The returned Operation will complete once the resources are uploaded. Package, Version, and File resources are created based on the imported artifact. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesAptartifactsUploadRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1UploadAptArtifactMediaResponse) The response message.
"""
config = self.GetMethodConfig('Upload')
upload_config = self.GetUploadConfig('Upload')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
Upload.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/aptArtifacts:create',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.aptartifacts.upload',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/aptArtifacts:create',
request_field='googleDevtoolsArtifactregistryV1alpha1UploadAptArtifactRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesAptartifactsUploadRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1UploadAptArtifactMediaResponse',
supports_download=False,
)
class ProjectsLocationsRepositoriesGooGetArtifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_gooGetArtifacts resource."""
_NAME = 'projects_locations_repositories_gooGetArtifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesGooGetArtifactsService, self).__init__(client)
self._upload_configs = {
}
def Import(self, request, global_params=None):
r"""Imports GooGet artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesGooGetArtifactsImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/gooGetArtifacts:import',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.gooGetArtifacts.import',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/gooGetArtifacts:import',
request_field='googleDevtoolsArtifactregistryV1alpha1ImportGooGetArtifactsRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesGooGetArtifactsImportRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsRepositoriesGoogetartifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_googetartifacts resource."""
_NAME = 'projects_locations_repositories_googetartifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesGoogetartifactsService, self).__init__(client)
self._upload_configs = {
'Upload': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=None,
resumable_path=None,
simple_multipart=True,
simple_path='/upload/v1alpha1/{+parent}/googetArtifacts:create',
),
}
def Upload(self, request, global_params=None, upload=None):
r"""Directly uploads a GooGet artifact. The returned Operation will complete once the resources are uploaded. Package, Version, and File resources are created based on the imported artifact. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesGoogetartifactsUploadRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1UploadGooGetArtifactMediaResponse) The response message.
"""
config = self.GetMethodConfig('Upload')
upload_config = self.GetUploadConfig('Upload')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
Upload.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/googetArtifacts:create',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.googetartifacts.upload',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/googetArtifacts:create',
request_field='googleDevtoolsArtifactregistryV1alpha1UploadGooGetArtifactRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesGoogetartifactsUploadRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1UploadGooGetArtifactMediaResponse',
supports_download=False,
)
class ProjectsLocationsRepositoriesYumArtifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_yumArtifacts resource."""
_NAME = 'projects_locations_repositories_yumArtifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesYumArtifactsService, self).__init__(client)
self._upload_configs = {
}
def Import(self, request, global_params=None):
r"""Imports Yum (RPM) artifacts. The returned Operation will complete once the resources are imported. Package, Version, and File resources are created based on the imported artifacts. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesYumArtifactsImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/yumArtifacts:import',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.yumArtifacts.import',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/yumArtifacts:import',
request_field='googleDevtoolsArtifactregistryV1alpha1ImportYumArtifactsRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesYumArtifactsImportRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsRepositoriesYumartifactsService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories_yumartifacts resource."""
_NAME = 'projects_locations_repositories_yumartifacts'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesYumartifactsService, self).__init__(client)
self._upload_configs = {
'Upload': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=None,
resumable_path=None,
simple_multipart=True,
simple_path='/upload/v1alpha1/{+parent}/yumArtifacts:create',
),
}
def Upload(self, request, global_params=None, upload=None):
r"""Directly uploads a Yum artifact. The returned Operation will complete once the resources are uploaded. Package, Version, and File resources are created based on the imported artifact. Imported artifacts that conflict with existing resources are ignored.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesYumartifactsUploadRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1UploadYumArtifactMediaResponse) The response message.
"""
config = self.GetMethodConfig('Upload')
upload_config = self.GetUploadConfig('Upload')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
Upload.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/yumArtifacts:create',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.yumartifacts.upload',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/yumArtifacts:create',
request_field='googleDevtoolsArtifactregistryV1alpha1UploadYumArtifactRequest',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesYumartifactsUploadRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1UploadYumArtifactMediaResponse',
supports_download=False,
)
class ProjectsLocationsRepositoriesService(base_api.BaseApiService):
"""Service class for the projects_locations_repositories resource."""
_NAME = 'projects_locations_repositories'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsRepositoriesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a repository. The returned Operation will finish once the repository has been created. Its response will be the created Repository.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories',
http_method='POST',
method_id='artifactregistry.projects.locations.repositories.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['repositoryId'],
relative_path='v1alpha1/{+parent}/repositories',
request_field='googleDevtoolsArtifactregistryV1alpha1Repository',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a repository and all of its contents. The returned Operation will finish once the repository has been deleted. It will not have any Operation metadata and will return a google.protobuf.Empty response.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}',
http_method='DELETE',
method_id='artifactregistry.projects.locations.repositories.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a repository.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1Repository) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}',
http_method='GET',
method_id='artifactregistry.projects.locations.repositories.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesGetRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1Repository',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists repositories.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1ListRepositoriesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories',
http_method='GET',
method_id='artifactregistry.projects.locations.repositories.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/repositories',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesListRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1ListRepositoriesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a repository.
Args:
request: (ArtifactregistryProjectsLocationsRepositoriesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleDevtoolsArtifactregistryV1alpha1Repository) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}',
http_method='PATCH',
method_id='artifactregistry.projects.locations.repositories.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1alpha1/{+name}',
request_field='googleDevtoolsArtifactregistryV1alpha1Repository',
request_type_name='ArtifactregistryProjectsLocationsRepositoriesPatchRequest',
response_type_name='GoogleDevtoolsArtifactregistryV1alpha1Repository',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (ArtifactregistryProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='artifactregistry.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (ArtifactregistryProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations',
http_method='GET',
method_id='artifactregistry.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/locations',
request_field='',
request_type_name='ArtifactregistryProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(ArtifactregistryV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| 46.832155
| 266
| 0.732674
|
fd95a40cac16b245badc74e58480545ea1139f70
| 7,760
|
py
|
Python
|
xfraud/supplement/07Learning_hybrid/ours_learn-grid-A.py
|
eBay/xFraud
|
55525c472f40c452b6fb3508f4f8b273af596081
|
[
"Apache-2.0"
] | 17
|
2021-11-04T12:49:28.000Z
|
2022-03-27T14:55:14.000Z
|
xfraud/supplement/07Learning_hybrid/ours_learn-grid-A.py
|
eBay/xFraud
|
55525c472f40c452b6fb3508f4f8b273af596081
|
[
"Apache-2.0"
] | 1
|
2022-03-12T01:03:11.000Z
|
2022-03-12T01:03:11.000Z
|
xfraud/supplement/07Learning_hybrid/ours_learn-grid-A.py
|
eBay/xFraud
|
55525c472f40c452b6fb3508f4f8b273af596081
|
[
"Apache-2.0"
] | 1
|
2022-03-01T08:06:49.000Z
|
2022-03-01T08:06:49.000Z
|
#!/usr/bin/env python
# coding: utf-8
# - Edge weight is inferred by GNNExplainer and node importance is given by five Ebay annotators. Not every annotator has annotated each node.
# - Seed is the txn to explain.
# - id is the community id.
import math
from tqdm.auto import tqdm
import random
import pandas as pd
import numpy as np
import networkx as nx
import itertools
from collections import Counter
import scipy.stats
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--random-draw', action = 'store', dest = 'random_draw', type = int, default = 100, help = 'Random draws to break the tie in ranking topk edges.')
parser.add_option('--edge-agg', action = 'store', dest = 'edge_agg', default = 'avg', choices = ['avg', 'min', 'sum'], help = 'Aggregation method to compute edge importance score based on the node importance scores.')
# parser.add_option('--explainer-w', action = 'store', dest = 'explainer_w', default = '0', type = float, help = 'Learned parameter for explainer weights.')
parser.add_option('-c', '--centrality-w', action = 'store', dest = 'centrality_w', default = '0', type = float, help = 'Learned parameter for centrality measures.')
(options, args) = parser.parse_args()
print ("Options:", options)
explainer_w = 1-options.centrality_w
learner = 'grid-{}'.format(options.centrality_w)
# Load in the annotation file, the data seed, the edge weights by explainer, the edges in the communities.
DataNodeImp = pd.read_csv('../05GNNExplainer-eval-hitrate/input/annotation_publish.csv')
DataSeed = pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-seed.txt')
DataEdgeWeight = pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-edge-weight.txt')
df_e = pd.read_csv('../05GNNExplainer-eval-hitrate/input/masked_df_e.csv')
x_y_df = pd.read_csv('x_y_df_learn.csv')
del x_y_df['max_hitrate']
x_y_df['combined_weights'] = explainer_w * x_y_df['explainer'] + options.centrality_w * x_y_df['edge_btw']
print('AUC score of the sample:', roc_auc_score(DataSeed.y, DataSeed['yhat']))
# Communities labeled 0 and 1.
comm0 = DataSeed[DataSeed.y==0].id.unique()
comm1 = DataSeed[DataSeed.y==1].id.unique()
df_node_weight = pd.read_csv('./results/df_node_weight_with_avgimp.csv')
# Preprocess explainer weights: calculate undirectional edge weight by taking the max weight of bidirectional edge weights.
# From node importance scores to edge importance score: "min"/"avg"/"sum".
df_edge_weight = df_e.copy()
df_edge_weight['importance'] = None
df_edge_weight['weight'] = None
df_edge_weight['weight_positive'] = None
df_edge_weight['weight_negative'] = None
for i, row in tqdm(df_edge_weight.iterrows(), total=len(df_edge_weight), ncols=80, mininterval=5):
src_node_id = row['source']
dst_node_id = row['target']
cc_id = row['community_id']
src_row = df_node_weight[(df_node_weight['node_id']==src_node_id) & (df_node_weight['community_id']==cc_id)].iloc[0]
dst_row = df_node_weight[(df_node_weight['node_id']==dst_node_id) & (df_node_weight['community_id']==cc_id)].iloc[0]
if options.edge_agg == 'min':
edge_imp_annotate = min(src_row['importance_avg'], dst_row['importance_avg'])
if options.edge_agg == 'avg':
edge_imp_annotate = np.mean([src_row['importance_avg'], dst_row['importance_avg']])
if options.edge_agg == 'sum':
edge_imp_annotate = src_row['importance_avg'] + dst_row['importance_avg']
edge_weights = DataEdgeWeight[DataEdgeWeight['src'].isin([src_node_id, dst_node_id]) &
DataEdgeWeight['dst'].isin([src_node_id, dst_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].max()
df_edge_weight['importance'].iloc[i] = edge_imp_annotate
df_edge_weight['weight'].iloc[i] = edge_weights
df_edge_weight['weight_positive'].iloc[i] = DataEdgeWeight[DataEdgeWeight['src'].isin([src_node_id]) &
DataEdgeWeight['dst'].isin([dst_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].iloc[0]
df_edge_weight['weight_negative'].iloc[i] = DataEdgeWeight[DataEdgeWeight['src'].isin([dst_node_id]) &
DataEdgeWeight['dst'].isin([src_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].iloc[0]
df_edge_weight['combined_weights'] = x_y_df['combined_weights']
# Avg edge/community.
print('Average edges per community:', df_edge_weight.shape[0]/41)
df_edge_weight.rename(columns={'source':'src', 'target': 'dst', 'community_id': 'id',
'importance': 'edge_importance', 'combined_weights': 'edge_weight'}, inplace=True)
df_edge_weight.to_csv('./results/df_edge_weight_imp-{}-{}.csv'.format(options.edge_agg, learner))
df_edge_weight.rename(columns={'edge_importance':'importance'}, inplace=True)
df_edge_weight = df_edge_weight.reset_index()
# Topk hit rate
hitrate_df = pd.DataFrame(index=['all', 'comm0', 'comm1'] + list(range(0,41)))
for k in [i*5 for i in range(1,11)]:
hitrate_list_topk_comm = []
for cid in df_edge_weight.id.unique():
df_edge_weight_sub = df_edge_weight[df_edge_weight.id==cid]
imp_largest = sorted(dict(Counter(df_edge_weight_sub.importance)).items(), reverse=True)[0][0]
count_largest = sorted(dict(Counter(df_edge_weight_sub.importance)).items(), reverse=True)[0][1]
hitrate_list_topk = []
for r in tqdm(range(0,options.random_draw), total=options.random_draw, ncols=80, mininterval=5):
random.seed(r)
if count_largest <= k:
src_id_human_topk = df_edge_weight_sub[['src','dst']].values.tolist()
else:
all_human_top_edge_idx = df_edge_weight_sub[df_edge_weight_sub.importance == imp_largest].index
human_topk_edge_idx = random.sample(list(all_human_top_edge_idx), k)
src_id_human_topk = df_edge_weight.iloc[human_topk_edge_idx][['src','dst']].values.tolist()
explainer_topk_edge = df_edge_weight_sub.sort_values(by=['edge_weight'], ascending=False)[['edge_weight', 'src', 'dst']][:k]
src_id_explainer_topk = explainer_topk_edge[['src','dst']].values.tolist()
hitrate = len([p for p in src_id_explainer_topk if p in src_id_human_topk or (p[1], p[0]) in src_id_human_topk])/k
hitrate_list_topk.append(hitrate)
hitrate_list_topk_comm.append(np.mean(hitrate_list_topk))
all_hitrate = np.mean(hitrate_list_topk_comm)
comm0_hitrate = np.mean([h for (i,h) in enumerate(hitrate_list_topk_comm) if i in comm0])
comm1_hitrate = np.mean([h for (i,h) in enumerate(hitrate_list_topk_comm) if i in comm1])
hitrate_df['top{}'.format(k)] = [all_hitrate, comm0_hitrate, comm1_hitrate] + hitrate_list_topk_comm
hitrate_df.to_csv('./results/topk-{}-{}-{}.csv'.format(options.random_draw, options.edge_agg, learner), index=True)
ours = hitrate_df.loc[['all', 'comm0', 'comm1']]
print('Our topk hit rate:', ours)
print(ours)
# In[17]:
train = hitrate_df.loc[range(0,21)]
test = hitrate_df.loc[range(21, 41)]
all = hitrate_df.loc[range(0, 41)]
np.mean(train).to_csv('./results/ours_{}_train.csv'.format(learner))
np.mean(test).to_csv('./results/ours_{}_test.csv'.format(learner))
np.mean(all).to_csv('./results/ours_{}_all.csv'.format(learner))
| 44.597701
| 217
| 0.683634
|
63f17d48f8c4626dd76450f86c4282b3b5f5ce93
| 8,469
|
py
|
Python
|
pylatex/utils.py
|
yxqd/PyLaTeX
|
467823012185bcdb7a7a6f6f253c2835193ab5c8
|
[
"MIT"
] | null | null | null |
pylatex/utils.py
|
yxqd/PyLaTeX
|
467823012185bcdb7a7a6f6f253c2835193ab5c8
|
[
"MIT"
] | null | null | null |
pylatex/utils.py
|
yxqd/PyLaTeX
|
467823012185bcdb7a7a6f6f253c2835193ab5c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module implements some simple utility functions.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
import os.path
import getpass
import shutil
import tempfile
import pylatex.base_classes
_latex_special_chars = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'\n': '\\newline%\n',
'-': r'{-}',
'\xA0': '~', # Non-breaking space
'[': r'{[}',
']': r'{]}',
}
_tmp_path = os.path.abspath(
os.path.join(
tempfile.gettempdir(),
getpass.getuser(),
"pylatex"
)
)
def _is_iterable(element):
return hasattr(element, '__iter__') and not isinstance(element, str)
class NoEscape(str):
"""
A simple string class that is not escaped.
When a `.NoEscape` string is added to another `.NoEscape` string it will
produce a `.NoEscape` string. If it is added to normal string it will
produce a normal string.
Args
----
string: str
The content of the `NoEscape` string.
"""
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self)
def __add__(self, right):
s = super().__add__(right)
if isinstance(right, NoEscape):
return NoEscape(s)
return s
def escape_latex(s):
r"""Escape characters that are special in latex.
Args
----
s : `str`, `NoEscape` or anything that can be converted to string
The string to be escaped. If this is not a string, it will be converted
to a string using `str`. If it is a `NoEscape` string, it will pass
through unchanged.
Returns
-------
NoEscape
The string, with special characters in latex escaped.
Examples
--------
>>> escape_latex("Total cost: $30,000")
'Total cost: \$30,000'
>>> escape_latex("Issue #5 occurs in 30% of all cases")
'Issue \#5 occurs in 30\% of all cases'
>>> print(escape_latex("Total cost: $30,000"))
References
----------
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866
"""
if isinstance(s, NoEscape):
return s
return NoEscape(''.join(_latex_special_chars.get(c, c) for c in str(s)))
def fix_filename(path):
r"""Fix filenames for use in LaTeX.
Latex has problems if there are one or more points in the filename, thus
'abc.def.jpg' will be changed to '{abc.def}.jpg'
Args
----
filename : str
The filen name to be changed.
Returns
-------
str
The new filename.
Examples
--------
>>> fix_filename("foo.bar.pdf")
'{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.pdf")
'/etc/local/{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/document.pdf")
'/etc/local/foo.bar.baz/document.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/foo~1/document.pdf")
'\detokenize{/etc/local/foo.bar.baz/foo~1/document.pdf}'
"""
path_parts = path.split('/' if os.name == 'posix' else '\\')
dir_parts = path_parts[:-1]
filename = path_parts[-1]
file_parts = filename.split('.')
if len(file_parts) > 2:
filename = '{' + '.'.join(file_parts[0:-1]) + '}.' + file_parts[-1]
dir_parts.append(filename)
fixed_path = '/'.join(dir_parts)
if '~' in fixed_path:
fixed_path = r'\detokenize{' + fixed_path + '}'
return fixed_path
def dumps_list(l, *, escape=True, token='%\n', mapper=None, as_content=True):
r"""Try to generate a LaTeX string of a list that can contain anything.
Args
----
l : list
A list of objects to be converted into a single string.
escape : bool
Whether to escape special LaTeX characters in converted text.
token : str
The token (default is a newline) to separate objects in the list.
mapper: callable or `list`
A function, class or a list of functions/classes that should be called
on all entries of the list after converting them to a string, for
instance `~.bold` or `~.MediumText`.
as_content: bool
Indicates whether the items in the list should be dumped using
`~.LatexObject.dumps_as_content`
Returns
-------
NoEscape
A single LaTeX string.
Examples
--------
>>> dumps_list([r"\textbf{Test}", r"\nth{4}"])
'\\textbf{Test}%\n\\nth{4}'
>>> print(dumps_list([r"\textbf{Test}", r"\nth{4}"]))
\textbf{Test}
\nth{4}
>>> print(pylatex.utils.dumps_list(["There are", 4, "lights!"]))
There are
4
lights!
>>> print(dumps_list(["$100%", "True"], escape=True))
\$100\%
True
"""
strings = (_latex_item_to_string(i, escape=escape, as_content=as_content)
for i in l)
if mapper is not None:
if not isinstance(mapper, list):
mapper = [mapper]
for m in mapper:
strings = [m(s) for s in strings]
strings = [_latex_item_to_string(s) for s in strings]
return NoEscape(token.join(strings))
def _latex_item_to_string(item, *, escape=False, as_content=False):
"""Use the render method when possible, otherwise uses str.
Args
----
item: object
An object that needs to be converted to a string
escape: bool
Flag that indicates if escaping is needed
as_content: bool
Indicates whether the item should be dumped using
`~.LatexObject.dumps_as_content`
Returns
-------
NoEscape
Latex
"""
if isinstance(item, pylatex.base_classes.LatexObject):
if as_content:
return item.dumps_as_content()
else:
return item.dumps()
elif not isinstance(item, str):
item = str(item)
if escape:
item = escape_latex(item)
return item
def bold(s, *, escape=True):
r"""Make a string appear bold in LaTeX formatting.
bold() wraps a given string in the LaTeX command \textbf{}.
Args
----
s : str
The string to be formatted.
escape: bool
If true the bold text will be escaped
Returns
-------
NoEscape
The formatted string.
Examples
--------
>>> bold("hello")
'\\textbf{hello}'
>>> print(bold("hello"))
\textbf{hello}
"""
if escape:
s = escape_latex(s)
return NoEscape(r'\textbf{' + s + '}')
def italic(s, *, escape=True):
r"""Make a string appear italicized in LaTeX formatting.
italic() wraps a given string in the LaTeX command \textit{}.
Args
----
s : str
The string to be formatted.
escape: bool
If true the italic text will be escaped
Returns
-------
NoEscape
The formatted string.
Examples
--------
>>> italic("hello")
'\\textit{hello}'
>>> print(italic("hello"))
\textit{hello}
"""
if escape:
s = escape_latex(s)
return NoEscape(r'\textit{' + s + '}')
def verbatim(s, *, delimiter='|'):
r"""Make the string verbatim.
Wraps the given string in a \verb LaTeX command.
Args
----
s : str
The string to be formatted.
delimiter : str
How to designate the verbatim text (default is a pipe | )
Returns
-------
NoEscape
The formatted string.
Examples
--------
>>> verbatim(r"\renewcommand{}")
'\\verb|\\renewcommand{}|'
>>> print(verbatim(r"\renewcommand{}"))
\verb|\renewcommand{}|
>>> print(verbatim('pi|pe', '!'))
\verb!pi|pe!
"""
return NoEscape(r'\verb' + delimiter + s + delimiter)
def make_temp_dir():
"""Create a temporary directory if it doesn't exist.
Directories created by this functionn follow the format specified
by ``_tmp_path`` and are a pylatex subdirectory within
a standard ``tempfile`` tempdir.
Returns
-------
str
The absolute filepath to the created temporary directory.
Examples
--------
>>> make_temp_dir()
'/var/folders/g9/ct5f3_r52c37rbls5_9nc_qc0000gn/T/pylatex'
"""
if not os.path.exists(_tmp_path):
os.makedirs(_tmp_path)
return _tmp_path
def rm_temp_dir():
"""Remove the temporary directory specified in ``_tmp_path``."""
if os.path.exists(_tmp_path):
shutil.rmtree(_tmp_path)
| 23.789326
| 79
| 0.585902
|
247b4470485d9e341902906feeb941e49ad98be2
| 2,539
|
py
|
Python
|
mysite/account/models.py
|
wuhaoqiu/engr597-stable
|
284ab9efae8361c139d330313abb831bfea9e5b9
|
[
"MIT"
] | null | null | null |
mysite/account/models.py
|
wuhaoqiu/engr597-stable
|
284ab9efae8361c139d330313abb831bfea9e5b9
|
[
"MIT"
] | 6
|
2020-06-05T20:18:51.000Z
|
2022-03-11T23:42:53.000Z
|
mysite/account/models.py
|
wuhaoqiu/engr597-stable
|
284ab9efae8361c139d330313abb831bfea9e5b9
|
[
"MIT"
] | 1
|
2020-06-24T01:39:15.000Z
|
2020-06-24T01:39:15.000Z
|
# User,Group,Permission
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
# this will create one more table called Profile, 并不是在原User table里直接增加
class Profile(models.Model):
# instead of inheritance, we use OneToOneField to extend models, so this is usually used in a child model
# Using settings.AUTH_USER_MODEL to relate profile with auth.user model
# get_user_model will attempt to retrieve the model class at the moment your app is imported the first time.
user=models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
date_of_birth=models.DateField(blank=True, null=True)
# in order to upload images, we need to add some lines in settings.py
photo=models.ImageField(upload_to='users/%Y/%m/%d/', blank=True,default='default/default_profile.png')
def __str__(self):
return 'Profile for user {}'.format(self.user.username)
# # this is a intermediary model for this many to many relationship, use it when you want to add extra features for your many to many relationship
# class Contact(models.Model):
# # follower
# user_from = models.ForeignKey('auth.User',related_name='rel_from_set',on_delete=models.CASCADE)
# # being followed
# user_to = models.ForeignKey('auth.User',related_name='rel_to_set',on_delete=models.CASCADE)
# created = models.DateTimeField(auto_now_add=True,db_index=True)
#
# class Meta:
# ordering = ('-created',)
#
# def __str__(self):
# return '{} follows {}'.format(self.user_from,self.user_to)
# now that we have built the intermediary, its time to tell 那两个有关系的Model 来使用这个intermediary to construct relationship
# here because <User> is built by system, so we canot modify it, instead, dynamically add this manytomanyfield that need to be built through <Contact> to the class
# also, tell django this relationship is not symmetric, which means a follows b, b donot need to follow a
# also, when we using intermediay, when we want to delete or remove a relatioship, we can only remove corresponding <Contact> instance instead of directly chaning <User> instance
# Fianlly warning, this adding method is not recommened, the better way is
# 1. adding this to <Profile> model
# 2. using django customized model https://docs.djangoproject.com/en/2.0/topics/auth/customizing/#specifying-a-custom-user-model
# User.add_to_class('following', models.ManyToManyField('self',through=Contact, related_name='followers',symmetrical=False))
| 59.046512
| 178
| 0.760142
|
919dd5cc6e139ab4a3fbf1e71dd8607700e407f7
| 7,137
|
py
|
Python
|
manim/animation/animation.py
|
yy/manim
|
2ed00b6b26499afa23ba419373e6899448e29f15
|
[
"MIT"
] | 1
|
2019-01-13T19:21:00.000Z
|
2019-01-13T19:21:00.000Z
|
manim/animation/animation.py
|
yy/manim
|
2ed00b6b26499afa23ba419373e6899448e29f15
|
[
"MIT"
] | null | null | null |
manim/animation/animation.py
|
yy/manim
|
2ed00b6b26499afa23ba419373e6899448e29f15
|
[
"MIT"
] | null | null | null |
"""Animate mobjects."""
__all__ = ["Animation", "Wait"]
import typing
from copy import deepcopy
import numpy as np
if typing.TYPE_CHECKING:
from manim.scene.scene import Scene
from .. import logger
from ..mobject.mobject import Mobject
from ..utils.rate_functions import smooth
DEFAULT_ANIMATION_RUN_TIME: float = 1.0
DEFAULT_ANIMATION_LAG_RATIO: float = 0.0
class Animation:
def __init__(
self,
mobject: Mobject,
# If lag_ratio is 0, the animation is applied to all submobjects
# at the same time
# If 1, it is applied to each successively.
# If 0 < lag_ratio < 1, its applied to each
# with lagged start times
lag_ratio: float = DEFAULT_ANIMATION_LAG_RATIO,
run_time: int = DEFAULT_ANIMATION_RUN_TIME,
rate_func: typing.Callable[[float, float], np.ndarray] = smooth,
name: str = None,
remover: bool = False, # remove a mobject from the screen?
suspend_mobject_updating: bool = True,
**kwargs
) -> None:
self._typecheck_input(mobject)
self.run_time = run_time
self.rate_func = rate_func
self.name = name
self.remover = remover
self.suspend_mobject_updating = suspend_mobject_updating
self.lag_ratio = lag_ratio
self.starting_mobject = None
self.mobject = mobject
if kwargs:
logger.debug("Animation received extra kwargs: %s", kwargs)
if hasattr(self, "CONFIG"):
logger.error(
(
"CONFIG has been removed from ManimCommunity.",
"Please use keyword arguments instead.",
)
)
def _typecheck_input(self, mobject: Mobject) -> None:
if mobject is None:
logger.warning("creating dummy animation")
elif not isinstance(mobject, Mobject):
raise TypeError("Animation only works on Mobjects")
def __str__(self) -> str:
if self.name:
return self.name
return self.__class__.__name__ + str(self.mobject)
def begin(self) -> None:
# This is called right as an animation is being
# played. As much initialization as possible,
# especially any mobject copying, should live in
# this method
self.starting_mobject = self.create_starting_mobject()
if self.suspend_mobject_updating:
# All calls to self.mobject's internal updaters
# during the animation, either from this Animation
# or from the surrounding scene, should do nothing.
# It is, however, okay and desirable to call
# the internal updaters of self.starting_mobject,
# or any others among self.get_all_mobjects()
self.mobject.suspend_updating()
self.interpolate(0)
def finish(self) -> None:
self.interpolate(1)
if self.suspend_mobject_updating:
self.mobject.resume_updating()
def clean_up_from_scene(self, scene: "Scene") -> None:
if self.is_remover():
scene.remove(self.mobject)
def create_starting_mobject(self) -> Mobject:
# Keep track of where the mobject starts
return self.mobject.copy()
def get_all_mobjects(self) -> typing.Tuple[Mobject, typing.Union[Mobject, None]]:
"""
Ordering must match the ordering of arguments to interpolate_submobject
"""
return self.mobject, self.starting_mobject
def get_all_families_zipped(self) -> typing.Iterator[typing.Tuple]:
return zip(
*[mob.family_members_with_points() for mob in self.get_all_mobjects()]
)
def update_mobjects(self, dt: int) -> None:
"""
Updates things like starting_mobject, and (for
Transforms) target_mobject. Note, since typically
(always?) self.mobject will have its updating
suspended during the animation, this will do
nothing to self.mobject.
"""
for mob in self.get_all_mobjects_to_update():
mob.update(dt)
def get_all_mobjects_to_update(self) -> list:
# The surrounding scene typically handles
# updating of self.mobject. Besides, in
# most cases its updating is suspended anyway
return list(filter(lambda m: m is not self.mobject, self.get_all_mobjects()))
def copy(self) -> "Animation":
return deepcopy(self)
# Methods for interpolation, the mean of an Animation
def interpolate(self, alpha: float) -> None:
alpha = np.clip(alpha, 0, 1)
self.interpolate_mobject(self.rate_func(alpha))
def update(self, alpha: float) -> None:
"""
This method shouldn't exist, but it's here to
keep many old scenes from breaking
"""
logger.warning(
"animation.update() has been deprecated. "
"Please use animation.interpolate() instead."
)
self.interpolate(alpha)
def interpolate_mobject(self, alpha: float) -> None:
families = list(self.get_all_families_zipped())
for i, mobs in enumerate(families):
sub_alpha = self.get_sub_alpha(alpha, i, len(families))
self.interpolate_submobject(*mobs, sub_alpha)
def interpolate_submobject(
self, submobject: Mobject, starting_submobject: Mobject, alpha: float
) -> None:
# Typically implemented by subclass
pass
def get_sub_alpha(self, alpha: float, index: int, num_submobjects: int):
# TODO, make this more understandable, and/or combine
# its functionality with AnimationGroup's method
# build_animations_with_timings
lag_ratio = self.lag_ratio
full_length = (num_submobjects - 1) * lag_ratio + 1
value = alpha * full_length
lower = index * lag_ratio
return np.clip((value - lower), 0, 1)
# Getters and setters
def set_run_time(self, run_time: float) -> "Animation":
self.run_time = run_time
return self
def get_run_time(self) -> float:
return self.run_time
def set_rate_func(
self, rate_func: typing.Callable[[float, float], np.ndarray]
) -> "Animation":
self.rate_func = rate_func
return self
def get_rate_func(self) -> typing.Callable[[float, float], np.ndarray]:
return self.rate_func
def set_name(self, name: str) -> "Animation":
self.name = name
return self
def is_remover(self) -> bool:
return self.remover
class Wait(Animation):
def __init__(
self, duration: float = 1, stop_condition=None, **kwargs
): # what is stop_condition?
self.duration = duration
self.mobject = None
self.stop_condition = stop_condition
super().__init__(None, **kwargs)
def begin(self) -> None:
pass
def finish(self) -> None:
pass
def clean_up_from_scene(self, scene: "Scene") -> None:
pass
def update_mobjects(self, dt: int) -> None:
pass
def interpolate(self, alpha: float) -> None:
pass
| 32.889401
| 85
| 0.628555
|
71a43f80646b0b3a01d151b1838849b9ac192a0b
| 327
|
py
|
Python
|
test/integration/test_config_schema.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
test/integration/test_config_schema.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 6
|
2021-11-11T20:57:49.000Z
|
2021-12-10T15:30:33.000Z
|
test/integration/test_config_schema.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
from galaxy_test.driver import integration_util
class ConfigSchemaTestCase(integration_util.IntegrationTestCase):
def test_schema_path_resolution_graph(self):
# Run schema's validation method; throws error if schema invalid
schema = self._app.config.schema
schema.validate_path_resolution_graph()
| 36.333333
| 72
| 0.785933
|
513069c763cf41e5dcc341806a5302ae1a490ae5
| 4,600
|
py
|
Python
|
horovod/spark/util/safe_shell_exec.py
|
rahul003/horovod
|
6a933cd4c60f7eae5816aa380adefda5b70ef3cf
|
[
"Apache-2.0"
] | 4
|
2018-10-10T18:17:45.000Z
|
2019-03-15T05:29:20.000Z
|
horovod/spark/util/safe_shell_exec.py
|
rahul003/horovod
|
6a933cd4c60f7eae5816aa380adefda5b70ef3cf
|
[
"Apache-2.0"
] | 7
|
2018-10-15T23:04:29.000Z
|
2019-01-17T23:15:03.000Z
|
horovod/spark/util/safe_shell_exec.py
|
rahul003/horovod
|
6a933cd4c60f7eae5816aa380adefda5b70ef3cf
|
[
"Apache-2.0"
] | 2
|
2018-11-01T18:30:11.000Z
|
2018-11-08T05:41:30.000Z
|
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import psutil
import signal
import subprocess
import sys
import threading
import time
GRACEFUL_TERMINATION_TIME_S = 5
def terminate_executor_shell_and_children(pid):
p = psutil.Process(pid)
# Terminate children gracefully.
for child in p.children():
try:
child.terminate()
except psutil.NoSuchProcess:
pass
# Wait for graceful termination.
time.sleep(GRACEFUL_TERMINATION_TIME_S)
# Send STOP to executor shell to stop progress.
p.send_signal(signal.SIGSTOP)
# Kill children recursively.
for child in p.children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
# Kill shell itself.
p.kill()
def forward_stream(src_fd, dst_stream):
if hasattr(dst_stream, 'buffer'):
# If dst stream is a text buffer, we need to get its binary buffer.
dst_stream = dst_stream.buffer
with os.fdopen(src_fd, 'rb') as src:
while True:
line = src.readline()
if not line:
break
dst_stream.write(line)
dst_stream.flush()
def execute(command, env=None, stdout=None, stderr=None):
# Make a pipe for the subprocess stdout/stderr.
(stdout_r, stdout_w) = os.pipe()
(stderr_r, stderr_w) = os.pipe()
# Make a pipe for notifying the child that parent has died.
(r, w) = os.pipe()
middleman_pid = os.fork()
if middleman_pid == 0:
# Close unused file descriptors to enforce PIPE behavior.
os.close(w)
os.setpgid(0, 0)
executor_shell = subprocess.Popen(command, shell=True, env=env,
stdout=stdout_w, stderr=stderr_w)
sigterm_received = threading.Event()
def set_sigterm_received(signum, frame):
sigterm_received.set()
signal.signal(signal.SIGINT, set_sigterm_received)
signal.signal(signal.SIGTERM, set_sigterm_received)
def kill_executor_children_if_parent_dies():
# This read blocks until the pipe is closed on the other side
# due to the process termination.
os.read(r, 1)
terminate_executor_shell_and_children(executor_shell.pid)
bg = threading.Thread(target=kill_executor_children_if_parent_dies)
bg.daemon = True
bg.start()
def kill_executor_children_if_sigterm_received():
sigterm_received.wait()
terminate_executor_shell_and_children(executor_shell.pid)
bg = threading.Thread(target=kill_executor_children_if_sigterm_received)
bg.daemon = True
bg.start()
exit_code = executor_shell.wait()
os._exit(exit_code)
# Close unused file descriptors to enforce PIPE behavior.
os.close(r)
os.close(stdout_w)
os.close(stderr_w)
# Redirect command stdout & stderr to provided streams or sys.stdout/sys.stderr.
# This is useful for Jupyter Notebook that uses custom sys.stdout/sys.stderr or
# for redirecting to a file on disk.
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
stdout_fwd = threading.Thread(target=forward_stream, args=(stdout_r, stdout))
stderr_fwd = threading.Thread(target=forward_stream, args=(stderr_r, stderr))
stdout_fwd.start()
stderr_fwd.start()
try:
_, status = os.waitpid(middleman_pid, 0)
except:
# interrupted, send middleman TERM signal which will terminate children
os.kill(middleman_pid, signal.SIGTERM)
while True:
try:
_, status = os.waitpid(middleman_pid, 0)
break
except:
# interrupted, wait for middleman to finish
pass
stdout_fwd.join()
stderr_fwd.join()
exit_code = status >> 8
return exit_code
| 30.872483
| 84
| 0.644783
|
a3a4873b216defa4635804e665fcffac42cf1a97
| 1,163
|
py
|
Python
|
readthedocs/settings/proxito/base.py
|
yohalchan/readthedocs.org
|
77df2790b985eb17f345a36656e15778443931b6
|
[
"MIT"
] | 1
|
2020-09-24T22:01:38.000Z
|
2020-09-24T22:01:38.000Z
|
readthedocs/settings/proxito/base.py
|
https-microsoft-com-powershell/readthedocs.org
|
a1d2de8907ecb2da84402bbb6cc5c271be20b5c0
|
[
"MIT"
] | null | null | null |
readthedocs/settings/proxito/base.py
|
https-microsoft-com-powershell/readthedocs.org
|
a1d2de8907ecb2da84402bbb6cc5c271be20b5c0
|
[
"MIT"
] | null | null | null |
"""
Base settings for Proxito
Some of these settings will eventually be backported into the main settings file,
but currently we have them to be able to run the site with the old middleware for
a staged rollout of the proxito code.
"""
class CommunityProxitoSettingsMixin:
ROOT_URLCONF = 'readthedocs.proxito.urls'
USE_SUBDOMAIN = True
SECURE_REFERRER_POLICY = "no-referrer-when-downgrade"
@property
def DATABASES(self):
# This keeps connections to the DB alive,
# which reduces latency with connecting to postgres
dbs = getattr(super(), 'DATABASES', {})
for db in dbs.keys():
dbs[db]['CONN_MAX_AGE'] = 86400
return dbs
@property
def MIDDLEWARE(self): # noqa
# Use our new middleware instead of the old one
classes = super().MIDDLEWARE
classes = list(classes)
classes.append('readthedocs.proxito.middleware.ProxitoMiddleware')
middleware_to_remove = (
'csp.middleware.CSPMiddleware',
)
for mw in middleware_to_remove:
if mw in classes:
classes.remove(mw)
return classes
| 29.075
| 81
| 0.654342
|
a485961ac05eb1e7dfb7ec6821462004c4a201ce
| 1,623
|
py
|
Python
|
pyfov.py
|
flags/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 56
|
2015-04-20T08:31:29.000Z
|
2021-12-19T14:05:18.000Z
|
pyfov.py
|
HexDecimal/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 2
|
2018-07-24T11:24:41.000Z
|
2021-05-16T03:04:53.000Z
|
pyfov.py
|
HexDecimal/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 9
|
2015-11-03T02:56:20.000Z
|
2021-04-28T08:19:57.000Z
|
def old_light(los_map, world_pos, size, row, start_slope, end_slope, xx, xy, yx, yy, collision_map, map_size):
_return_chunks = set()
if start_slope < end_slope:
return los_map, _return_chunks
x, y, z = world_pos
_next_start_slope = start_slope
for i in range(row, size):
_blocked = False
_d_x = -i
_d_y = -i
while _d_x <= 0:
_l_slope = (_d_x - 0.5) / (_d_y + 0.5)
_r_slope = (_d_x + 0.5) / (_d_y - 0.5)
if start_slope < _r_slope:
_d_x += 1
continue
elif end_slope>_l_slope:
break
_sax = _d_x * xx + _d_y * xy
_say = _d_x * yx + _d_y * yy
if (_sax<0 and abs(_sax)>x) or (_say<0 and abs(_say)>y):
_d_x += 1
continue
_a_x = x + _sax
_a_y = y + _say
if _a_x >= map_size[0] or _a_y >= map_size[1]:
_d_x += 1
continue
_rad2 = size*size
_solid = collision_map[_sax+size, _say+size]
if (_d_x * _d_x + _d_y * _d_y) < _rad2:
los_map[_sax+size, _say+size] = 1
if not _solid:
_chunk_key = '%s,%s' % ((_a_x/5)*5, (_a_y/5)*5)
if not _chunk_key in _return_chunks:
_return_chunks.add(_chunk_key)
if _blocked:
if _solid:
_next_start_slope = _r_slope
_d_x += 1
continue
else:
_blocked = False
start_slope = _next_start_slope
elif _solid:
_blocked = True
_next_start_slope = _r_slope
_map, _chunk_keys = old_light(los_map, world_pos, size, i+1, start_slope, _l_slope, xx, xy, yx, yy, collision_map, map_size)
los_map += _map
_return_chunks.update(_chunk_keys)
_d_x += 1
if _blocked:
break
return los_map, _return_chunks
| 22.232877
| 128
| 0.617375
|
dff37a9bfafeafe6be435860ab916e5ecf533b32
| 3,142
|
py
|
Python
|
buildout_env/lib/python2.7/site-packages/scipy/interpolate/__init__.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 11
|
2017-12-25T23:22:13.000Z
|
2021-09-28T00:23:37.000Z
|
buildout_env/lib/python2.7/site-packages/scipy/interpolate/__init__.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 1
|
2017-01-12T09:51:20.000Z
|
2017-03-09T23:19:57.000Z
|
buildout_env/lib/python2.7/site-packages/scipy/interpolate/__init__.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 4
|
2017-01-11T15:40:52.000Z
|
2018-07-25T16:03:52.000Z
|
"""========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. currentmodule:: scipy.interpolate
Sub-package for objects used in interpolation.
As listed below, this sub-package contains spline functions and classes,
one-dimensional and multi-dimensional (univariate and multivariate)
interpolation classes, Lagrange and Taylor polynomial interpolators, and
wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
and DFITPACK functions.
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
interp1d
BarycentricInterpolator
KroghInterpolator
PiecewisePolynomial
PchipInterpolator
barycentric_interpolate
krogh_interpolate
piecewise_polynomial_interpolate
pchip_interpolate
Akima1DInterpolator
PPoly
BPoly
Multivariate interpolation
==========================
Unstructured data:
.. autosummary::
:toctree: generated/
griddata
LinearNDInterpolator
NearestNDInterpolator
CloughTocher2DInterpolator
Rbf
interp2d
For data on a grid:
.. autosummary::
:toctree: generated/
interpn
RegularGridInterpolator
RectBivariateSpline
.. seealso:: `scipy.ndimage.interpolation.map_coordinates`
1-D Splines
===========
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
Functional interface to FITPACK functions:
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
splder
splantider
insert
2-D Splines
===========
For data on a grid:
.. autosummary::
:toctree: generated/
RectBivariateSpline
RectSphereBivariateSpline
For unstructured data:
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
SmoothSphereBivariateSpline
LSQBivariateSpline
LSQSphereBivariateSpline
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
.. seealso::
`scipy.ndimage.interpolation.map_coordinates`,
`scipy.ndimage.interpolation.spline_filter`,
`scipy.signal.resample`,
`scipy.signal.bspline`,
`scipy.signal.gauss_spline`,
`scipy.signal.qspline1d`,
`scipy.signal.cspline1d`,
`scipy.signal.qspline1d_eval`,
`scipy.signal.cspline1d_eval`,
`scipy.signal.qspline2d`,
`scipy.signal.cspline2d`.
Functions existing for backward compatibility (should not be used in
new code):
.. autosummary::
:toctree: generated/
ppform
spleval
spline
splmake
spltopp
pchip
"""
from __future__ import division, print_function, absolute_import
from .interpolate import *
from .fitpack import *
# New interface to fitpack library:
from .fitpack2 import *
from .rbf import Rbf
from .polyint import *
from ._monotone import *
from .ndgriddata import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| 17.852273
| 72
| 0.706238
|
a0a5bc9014071cbd73a5ac2cb48f3db38cea9565
| 2,023
|
py
|
Python
|
court_scraper/platforms/oscn/pages/daily_filings.py
|
mscarey/court-scraper
|
0e13976d901352a09cfd7e48450bbe427494f48e
|
[
"0BSD"
] | 30
|
2020-09-08T23:59:34.000Z
|
2022-03-24T03:02:47.000Z
|
court_scraper/platforms/oscn/pages/daily_filings.py
|
palewire/court-scraper
|
da4b614fb16806d8b5117373d273f802ca93a8cb
|
[
"0BSD"
] | 111
|
2020-09-16T23:42:40.000Z
|
2022-02-19T01:25:55.000Z
|
court_scraper/platforms/oscn/pages/daily_filings.py
|
palewire/court-scraper
|
da4b614fb16806d8b5117373d273f802ca93a8cb
|
[
"0BSD"
] | 9
|
2020-10-05T13:19:03.000Z
|
2021-12-11T12:12:13.000Z
|
import requests
from court_scraper.utils import dates_for_range
from .base_search import BaseSearch
from .daily_filings_results import DailyFilingsResultsPage
class DailyFilings(BaseSearch):
"""Search daily filings by county for limited number of larger counties.
Supports searches by date only.
Args:
- place_id (str): Standard place id (e.g. ok_tulsa or ok_roger_mills)
"""
def __init__(self, place_id):
self.url = 'https://www.oscn.net/applications/oscn/report.asp'
self.place_id = place_id
def search(self, start_date, end_date, case_details=False):
date_format = "%m-%d-%y"
dates = dates_for_range(start_date, end_date, output_format=date_format)
search_results = []
for date_str in dates:
# Convert date_str to standard YYYY-MM-DD for upstream usage
date_key = self._standardize_date(date_str, date_format, "%Y-%m-%d")
basic_case_data = self._run_search_for_day(date_str)
# Skip if there were no results for date
if not basic_case_data:
continue
if case_details:
results = self._scrape_case_details(date_key, basic_case_data)
search_results.extend(results)
else:
# Add the filing date to CaseInfo instances if it's only a metadata search
# since it's not listed on results page
for case in basic_case_data:
case.update({'filing_date': date_key})
search_results.append(case)
return search_results
def _run_search_for_day(self, day):
payload = {
'report': 'DailyFilings',
'errorcheck': 'true',
'database': '',
'db': self._place,
'StartDate': day
}
response = requests.get(self.url, params=payload)
html = response.text
page = DailyFilingsResultsPage(self.place_id, html)
return page.results
| 36.781818
| 90
| 0.622837
|
615cdacf1566ffb3afa10469197aad0ddba40f10
| 4,463
|
py
|
Python
|
code/sf_t3d/settings.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | 8
|
2017-03-13T16:34:28.000Z
|
2021-11-16T11:35:56.000Z
|
code/sf_t3d/settings.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | null | null | null |
code/sf_t3d/settings.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | 3
|
2017-03-28T09:26:40.000Z
|
2020-12-08T14:16:12.000Z
|
"""
Django settings for sf_t3d project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o5+o2jv(3-dqr(&ia#-@79cgr%xi*s+6xjws^8cxp211ge#buf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = "sf_user.CustomUser"
SITE_NAME = "RDCL 3D"
SHORT_SITE_NAME = "RDCL"
LOGIN_URL = '/auth/'
LOGOUT_URL = '/auth/'
VERSION = "0.0.1"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'sf_user',
'projecthandler',
'deploymenthandler',
'webhookhandler'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SESSION_ENGINE='sf_user.sessions'
SESSION_COOKIE_AGE = 3500 #25 min
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SAVE_EVERY_REQUEST = True
ROOT_URLCONF = 'sf_t3d.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'template'),
os.path.join(BASE_DIR, 'projecthandler', 'template'),
os.path.join(BASE_DIR, 'projecthandler', 'template', 'download'),
os.path.join(BASE_DIR, 'projecthandler', 'template', 'project'),
os.path.join(BASE_DIR, 'deploymenthandler', 'template'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'sf_t3d.context_processor.conf_constants',
],
'libraries':{
'get': 'sf_t3d.templatetags.get',
}
},
},
]
WSGI_APPLICATION = 'sf_t3d.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, "static"),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
| 26.885542
| 91
| 0.686758
|
a1d26b4b1ab781aeb7a08930d510bb6093b06a37
| 1,186
|
py
|
Python
|
v2.5.7/toontown/hood/SBHoodDataAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v2.5.7/toontown/hood/SBHoodDataAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v2.5.7/toontown/hood/SBHoodDataAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from direct.directnotify import DirectNotifyGlobal
import HoodDataAI
from toontown.toonbase import ToontownGlobals
from toontown.safezone import ButterflyGlobals
from toontown.episodes.DistributedPrologueEventAI import DistributedPrologueEventAI
class SBHoodDataAI(HoodDataAI.HoodDataAI):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodAI')
def __init__(self, air, zoneId=None):
hoodId = ToontownGlobals.ScroogeBank
if zoneId == None:
zoneId = hoodId
HoodDataAI.HoodDataAI.__init__(self, air, zoneId, hoodId)
return
def startup(self):
self.notify.info('Creating prologue...')
HoodDataAI.HoodDataAI.startup(self)
self.butterflies = []
self.proEv = None
self.createButterflies(ButterflyGlobals.DG)
if self.air.wantPrologue:
self.createPrologueEvent()
return
def createPrologueEvent(self):
self.proEv = self.air.doFind('PrologueEvent')
if self.proEv is None:
self.proEv = DistributedPrologueEventAI(self.air)
self.proEv.generateWithRequired(self.zoneId)
self.proEv.b_setState('Idle')
return
| 35.939394
| 83
| 0.699831
|
e35971a3344bda446a680bfee708ae69607e10c8
| 736
|
py
|
Python
|
examples/LandmarkDetection/DQN/utils/change_paths.py
|
Jdorri/rl-medical
|
750b1f10413daa9cd8e346332bd844212e76ddd8
|
[
"Apache-2.0"
] | 6
|
2020-01-10T15:32:00.000Z
|
2021-06-14T17:28:03.000Z
|
examples/LandmarkDetection/DQN/utils/change_paths.py
|
Jdorri/rl-medical
|
750b1f10413daa9cd8e346332bd844212e76ddd8
|
[
"Apache-2.0"
] | 78
|
2020-01-19T10:47:31.000Z
|
2020-05-13T11:13:05.000Z
|
examples/LandmarkDetection/DQN/utils/change_paths.py
|
Jdorri/rl-medical
|
750b1f10413daa9cd8e346332bd844212e76ddd8
|
[
"Apache-2.0"
] | 1
|
2020-10-04T12:04:26.000Z
|
2020-10-04T12:04:26.000Z
|
import os
new_base_path = "/vol/biomedic/users/aa16914/shared/data/RL_data/cardiac_mri_adult"
file_path = "/vol/biomedic/users/aa16914/shared/data/RL_data/"
data_files = ["cardiac_test_files.txt", "cardiac_train_files.txt"]
landmark_files = ["cardiac_test_landmarks.txt", "cardiac_train_landmarks.txt"]
file_full_path = os.path.join(file_path, landmark_files[0])
print(file_path)
with open(file_full_path) as fp:
for line in fp.readlines():
new_path = '/'.join(new_base_path.split('/') + line.split('/')[7:])
# print(new_path)
write_path = os.path.join(file_path, "cardiac_test_landmarks_new_paths.txt")
f_new=open(write_path, "a+")
f_new.write(new_path)
f_new.close()
| 32
| 84
| 0.703804
|
f0c83b1c1795b799bf17d60b046b9de50a0af6eb
| 2,750
|
py
|
Python
|
build_vectors.py
|
alekseimi/code2vec_mag
|
49281e64c437066fd510787d7b3c4867a1b32a21
|
[
"MIT"
] | null | null | null |
build_vectors.py
|
alekseimi/code2vec_mag
|
49281e64c437066fd510787d7b3c4867a1b32a21
|
[
"MIT"
] | 4
|
2020-03-04T23:19:59.000Z
|
2022-02-10T00:15:38.000Z
|
build_vectors.py
|
alekseimi/code2vec_mag
|
49281e64c437066fd510787d7b3c4867a1b32a21
|
[
"MIT"
] | null | null | null |
from common import common
from extractor import Extractor
import os
import csv
SHOW_TOP_CONTEXTS = 2
MAX_PATH_LENGTH = 8
MAX_PATH_WIDTH = 2
JAR_PATH = 'JavaExtractor/JPredict/target/JavaExtractor-0.0.1-SNAPSHOT.jar'
# 1. Pridobi vektor in ime metode
# raw_prediction, method_prediction_results
# raw_prediction.original__name
# 2. Ime metode oblike x|y|z pretvori
class VectorBuilder:
#
exit_keywords = ['exit', 'quit', 'q']
def __init__(self, config, model):
model.predict([])
self.model = model
self.config = config
self.path_extractor = Extractor(config,
jar_path=JAR_PATH,
max_path_length=MAX_PATH_LENGTH,
max_path_width=MAX_PATH_WIDTH)
def iterate_over_directory(self, rootdir, project_name):
dir_list = []
full_list = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".java"):
generated_list = self.generate_code_vectors(filepath, project_name)
if generated_list is None:
continue
print(filepath + "list size: " + str(len(generated_list)))
full_list.extend(generated_list)
dir_list.append(filepath)
self.dump_dirlist_to_csv(dir_list, project_name)
return full_list
def dump_dirlist_to_csv(self, dirlist, project_name):
with open(project_name+'.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(dirlist)
return True
return False
def generate_code_vectors(self, filename, project_name):
try:
predict_lines, hash_to_string_dict = self.path_extractor.extract_paths(filename)
except ValueError as e:
return None
raw_prediction_results = self.model.predict(predict_lines)
method_prediction_results = common.parse_prediction_results(
raw_prediction_results, hash_to_string_dict,
self.model.vocabs.target_vocab.special_words, topk=SHOW_TOP_CONTEXTS)
item_list = []
for raw_prediction, method_prediction in zip(raw_prediction_results, method_prediction_results):
#values_list = [project_name, raw_prediction.original_name]
#full_values = values_list + raw_prediction.code_vector.tolist()
#item_list.append(full_values)
item_list.append(raw_prediction.code_vector.tolist())
return item_list
| 39.285714
| 104
| 0.630909
|
480955c3b68ec4c9d8e9c1b28f061222a212b819
| 968
|
py
|
Python
|
statbus/models/util.py
|
psykzz/statbus
|
67fb9ed487b45cdab36b61e7d037952a13fff492
|
[
"MIT"
] | null | null | null |
statbus/models/util.py
|
psykzz/statbus
|
67fb9ed487b45cdab36b61e7d037952a13fff492
|
[
"MIT"
] | null | null | null |
statbus/models/util.py
|
psykzz/statbus
|
67fb9ed487b45cdab36b61e7d037952a13fff492
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import Callable, Any
from peewee import *
from statbus.utils.FlaskDB import FlaskDBWrapper as FlaskDB
db_wrapper = FlaskDB()
class DBModel(db_wrapper.Model):
"""Meta model all models should parent from"""
pass
""" Enum field hack
This isn't supported by peewee but easy enough to add
"""
class EnumField(CharField):
"""
This class enable an Enum like field for Peewee
"""
def __init__(self, choices: Callable, *args: Any, **kwargs: Any) -> None:
super(CharField, self).__init__(*args, **kwargs)
self.choices = choices
self.max_length = 255
def db_value(self, value: Any) -> Any:
return value.value
def python_value(self, value: Any) -> Any:
return self.choices(type(list(self.choices)[0].value)(value))
class KeyTypeEnum(Enum):
TEXT = "text"
AMOUNT = "amount"
TALLY = "tally"
NESTED = "nested tally"
ASSOCIATIVE = "associative"
| 22
| 77
| 0.663223
|
6d7bcf35d1948be5683e503a1e54a003104650fd
| 1,180
|
py
|
Python
|
dummy_package/subpackage2/submoduleB.py
|
b1quint/testing_rtd
|
4745a223eebbf182ff217d05615298f88453b085
|
[
"MIT"
] | null | null | null |
dummy_package/subpackage2/submoduleB.py
|
b1quint/testing_rtd
|
4745a223eebbf182ff217d05615298f88453b085
|
[
"MIT"
] | null | null | null |
dummy_package/subpackage2/submoduleB.py
|
b1quint/testing_rtd
|
4745a223eebbf182ff217d05615298f88453b085
|
[
"MIT"
] | 1
|
2019-01-16T13:40:38.000Z
|
2019-01-16T13:40:38.000Z
|
"""
This is a submodule living inside subpackage1.
"""
import os
def my_other_method1(p):
"""
This is the doctring used on subpackage2. It has nothing to do with the `dummy_package.greet_people` module.
Parameters
----------
p : str
I have no idea why this should be a string.
"""
return p * 5
def my_other_method2(o):
"""
This is anoooother doctring of a method living inside package2. Why? Just because I wanted to have more than one.
Parameters
----------
o : int
Note that this parameter should be a string because it wanted to be different.
Returns
-------
Five times 'o'
"""
return 5 * o
class MyOtherClass:
"""
A dummy class with no contructor.
"""
def __init__(self):
self.my_attribute = None
def public_method(self, an_int_number):
"""
And I am doing all this to simulate a complete package. Seems boring but makes me understand it easier.
Parameters
----------
an_int_number : bool
This is a bad variable name
"""
self.my_attribute = an_int_number
| 21.454545
| 117
| 0.590678
|
1604bc534278e67327c499859427504aa3818a78
| 114,693
|
py
|
Python
|
source/list_future_reminders/list_future_reminders.py
|
peterlynch/MoneydancePythonScripts
|
ecadd7bf5e4197e8e457ee6a9ba13bc586fd4346
|
[
"MIT"
] | null | null | null |
source/list_future_reminders/list_future_reminders.py
|
peterlynch/MoneydancePythonScripts
|
ecadd7bf5e4197e8e457ee6a9ba13bc586fd4346
|
[
"MIT"
] | null | null | null |
source/list_future_reminders/list_future_reminders.py
|
peterlynch/MoneydancePythonScripts
|
ecadd7bf5e4197e8e457ee6a9ba13bc586fd4346
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# list_future_reminders.py (build: 1004)
###############################################################################
# MIT License
#
# Copyright (c) 2020 Stuart Beesley - StuWareSoftSystems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# Build: 1000 - Initial release - cribbed from extract_reminders_csv (for @CanSaver)
# Build: 1001 - Enhancement to prevent duplicate extension running.....
# Build: 1002 - Tweak to block old MD versions...
# Build: 1003 - tweak to common code for launch detection
# Build: 1004 - tweak to common code (minor, non-functional change)
# Displays Moneydance future reminders
# Detect another instance of this code running in same namespace - i.e. a Moneydance Extension
# CUSTOMIZE AND COPY THIS ##############################################################################################
# CUSTOMIZE AND COPY THIS ##############################################################################################
# CUSTOMIZE AND COPY THIS ##############################################################################################
myModuleID = u"list_future_reminders"
global list_future_reminders_frame_
global moneydance, moneydance_data, moneydance_ui
global moneydance_extension_loader
from java.lang import System
from javax.swing import JFrame
from java.awt.event import WindowEvent
class MyJFrame(JFrame):
def __init__(self, frameTitle=None):
super(JFrame, self).__init__(frameTitle)
self.myJFrameVersion = 2
self.isActiveInMoneydance = False
self.isRunTimeExtension = False
self.MoneydanceAppListener = None
self.HomePageViewObj = None
def getMyJFrame( moduleName ):
try:
frames = JFrame.getFrames()
for fr in frames:
if (fr.getName().lower().startswith(u"%s_main" %moduleName)
and type(fr).__name__ == MyJFrame.__name__ # isinstance() won't work across namespaces
and fr.isActiveInMoneydance):
print("%s: Found live frame: %s (MyJFrame() version: %s)" %(myModuleID,fr.getName(),fr.myJFrameVersion))
System.err.write("%s: Found live frame: %s (MyJFrame() version: %s)\n" %(myModuleID, fr.getName(),fr.myJFrameVersion))
if fr.isRunTimeExtension: print("%s: This extension is a run-time self-installed extension too..." %(myModuleID))
if fr.isRunTimeExtension: System.err.write("%s: This extension is a run-time self-installed extension too...\n" %(myModuleID))
return fr
except:
System.err.write("%s: Critical error in getMyJFrame(); caught and ignoring...!\n" %(myModuleID))
return None
frameToResurrect = None
try:
if (u"%s_frame_"%myModuleID in globals()
and isinstance(list_future_reminders_frame_, MyJFrame)
and list_future_reminders_frame_.isActiveInMoneydance):
frameToResurrect = list_future_reminders_frame_
else:
getFr = getMyJFrame( myModuleID )
if getFr is not None:
frameToResurrect = getFr
del getFr
except:
System.err.write("%s: Critical error checking frameToResurrect(1); caught and ignoring...!\n" %(myModuleID))
lTerminatedExtension = False
try:
if frameToResurrect: # and it's still alive.....
if frameToResurrect.isRunTimeExtension: # this must be an install/reinstall. I need to deactivate and re-register extension...
print("%s: Detected that runtime extension %s is already running..... Assuming a re-installation... Taking appropriate action..." %(myModuleID, myModuleID))
System.err.write("%s: Detected that runtime extension %s is already running..... Assuming a re-installation... Taking appropriate action...\n" %(myModuleID, myModuleID))
frameToResurrect.isActiveInMoneydance = False
try:
frameToResurrect.setVisible(False)
frameToResurrect.dispatchEvent(WindowEvent(frameToResurrect, WindowEvent.WINDOW_CLOSING))
System.err.write("%s: Pushed a windowClosing event to existing extension... Hopefully it will close to allow re-installation...\n" %(myModuleID))
except:
System.err.write("%s: ERROR pushing a windowClosing event to existing extension!\n" %(myModuleID))
lTerminatedExtension = True
frameToResurrect = None
else:
print("%s: Detected that %s is already running..... Attempting to resurrect.." %(myModuleID, myModuleID))
System.err.write("%s: Detected that %s is already running..... Attempting to resurrect..\n" %(myModuleID, myModuleID))
except:
System.err.write("%s: Critical error checking frameToResurrect(2); caught and ignoring...!\n" %(myModuleID))
if float(moneydance.getBuild()) < 1904: # Check for builds less than 1904 / version < 2019.4
try:
moneydance.getUI().showInfoMessage("SORRY YOUR VERSION IS TOO OLD FOR THIS SCRIPT/EXTENSION")
except:
raise Exception("SORRY YOUR MONEYDANCE VERSION IS TOO OLD FOR THIS SCRIPT/EXTENSION")
elif frameToResurrect:
try:
frameToResurrect.setVisible(True)
if frameToResurrect.getExtendedState() == JFrame.ICONIFIED:
frameToResurrect.setExtendedState(JFrame.NORMAL)
frameToResurrect.toFront()
except:
print("%s: Failed to resurrect main Frame.. This duplicate Script/extension is now terminating....." %(myModuleID))
System.err.write("%s: Failed to resurrect main Frame.. This duplicate Script/extension is now terminating.....\n" %(myModuleID))
raise Exception("SORRY - YOU CAN ONLY HAVE ONE INSTANCE OF %s RUNNING AT ONCE" %(myModuleID.upper()))
else:
del frameToResurrect
if not lTerminatedExtension:
print("%s: No other 'live' instances of this program detected (or I terminated it) - running as normal" %(myModuleID))
System.err.write("%s: No other instances of this program detected (or I terminated it) - running as normal\n" %(myModuleID))
else:
print("%s: I terminated extension in memory, running script to allow new installation..." %(myModuleID))
System.err.write("%s: I terminated extension in memory, running script to allow new installation...\n" %(myModuleID))
# COMMON IMPORTS #######################################################################################################
# COMMON IMPORTS #######################################################################################################
# COMMON IMPORTS #######################################################################################################
import sys
reload(sys) # Dirty hack to eliminate UTF-8 coding errors
sys.setdefaultencoding('utf8') # Dirty hack to eliminate UTF-8 coding errors. Without this str() fails on unicode strings...
import os
import os.path
import codecs
import inspect
import pickle
import platform
import csv
import datetime
from org.python.core.util import FileUtil
from com.moneydance.util import Platform
from com.moneydance.awt import JTextPanel, GridC, JDateField
from com.moneydance.apps.md.view.gui import MDImages
from com.infinitekind.util import DateUtil, CustomDateFormat
from com.infinitekind.moneydance.model import *
from com.infinitekind.moneydance.model import AccountUtil, AcctFilter, CurrencyType, CurrencyUtil
from com.infinitekind.moneydance.model import Account, Reminder, ParentTxn, SplitTxn, TxnSearch, InvestUtil, TxnUtil
from javax.swing import JButton, JScrollPane, WindowConstants, JLabel, JPanel, JComponent, KeyStroke, JDialog, JComboBox
from javax.swing import JOptionPane, JTextArea, JMenuBar, JMenu, JMenuItem, AbstractAction, JCheckBoxMenuItem, JFileChooser
from javax.swing import JTextField, JPasswordField, Box, UIManager, JTable, JCheckBox, JRadioButton, ButtonGroup
from javax.swing.text import PlainDocument
from javax.swing.border import EmptyBorder
from java.awt import Color, Dimension, FileDialog, FlowLayout, Toolkit, Font, GridBagLayout, GridLayout
from java.awt import BorderLayout, Dialog, Insets
from java.awt.event import KeyEvent, WindowAdapter, InputEvent
from java.util import Date
from java.text import DecimalFormat, SimpleDateFormat
from java.util import Calendar, ArrayList
from java.lang import Double, Math, Character
from java.io import FileNotFoundException, FilenameFilter, File, FileInputStream, FileOutputStream, IOException, StringReader
from java.io import BufferedReader, InputStreamReader
if isinstance(None, (JDateField,CurrencyUtil,Reminder,ParentTxn,SplitTxn,TxnSearch, JComboBox, JCheckBox,
JTextArea, JMenuBar, JMenu, JMenuItem, JCheckBoxMenuItem, JFileChooser, JDialog,
JButton, FlowLayout, InputEvent, ArrayList, File, IOException, StringReader, BufferedReader,
InputStreamReader, Dialog, JTable, BorderLayout, Double, InvestUtil, JRadioButton, ButtonGroup,
AccountUtil, AcctFilter, CurrencyType, Account, TxnUtil, JScrollPane, WindowConstants, JFrame,
JComponent, KeyStroke, AbstractAction, UIManager, Color, Dimension, Toolkit, KeyEvent,
WindowAdapter, CustomDateFormat, SimpleDateFormat, Insets, FileDialog)): pass
if codecs.BOM_UTF8 is not None: pass
if csv.QUOTE_ALL is not None: pass
if datetime.MINYEAR is not None: pass
if Math.max(1,1): pass
# END COMMON IMPORTS ###################################################################################################
# COMMON GLOBALS #######################################################################################################
global debug # Set to True if you want verbose messages, else set to False....
global myParameters, myScriptName, version_build, _resetParameters, i_am_an_extension_so_run_headless, moneydanceIcon
global lPickle_version_warning, decimalCharSep, groupingCharSep, lIamAMac, lGlobalErrorDetected
global MYPYTHON_DOWNLOAD_URL
# END COMMON GLOBALS ###################################################################################################
# SET THESE VARIABLES FOR ALL SCRIPTS ##################################################################################
version_build = "1004" # noqa
myScriptName = u"%s.py(Extension)" %myModuleID # noqa
debug = False # noqa
myParameters = {} # noqa
_resetParameters = False # noqa
lPickle_version_warning = False # noqa
lIamAMac = False # noqa
lGlobalErrorDetected = False # noqa
MYPYTHON_DOWNLOAD_URL = "https://yogi1967.github.io/MoneydancePythonScripts/" # noqa
# END SET THESE VARIABLES FOR ALL SCRIPTS ##############################################################################
# >>> THIS SCRIPT'S IMPORTS ############################################################################################
from com.moneydance.apps.md.view.gui import EditRemindersWindow
from java.awt.event import MouseAdapter
from java.util import Comparator
from javax.swing import SortOrder, ListSelectionModel
from javax.swing.table import DefaultTableCellRenderer, DefaultTableModel, TableRowSorter
from javax.swing.border import CompoundBorder, MatteBorder
from javax.swing.event import TableColumnModelListener
from java.lang import String, Number
from com.infinitekind.util import StringUtils
from java.awt.event import WindowEvent
from java.lang import Runnable, Thread
from com.moneydance.apps.md.controller import AppEventListener
# >>> END THIS SCRIPT'S IMPORTS ########################################################################################
# >>> THIS SCRIPT'S GLOBALS ############################################################################################
# Saved to parameters file
global __list_future_reminders
global userdateformat, lStripASCII, csvDelimiter, _column_widths_LFR, scriptpath, daysToLookForward_LFR
global lWriteBOMToExportFile_SWSS
# Other used by this program
global csvfilename, lDisplayOnly
global baseCurrency, sdf, csvlines, csvheaderline, headerFormats
global table, focus, row, scrollpane, EditedReminderCheck, ReminderTable_Count, ExtractDetails_Count
global saveStatusLabel
# >>> END THIS SCRIPT'S GLOBALS ############################################################################################
# Set programmatic defaults/parameters for filters HERE.... Saved Parameters will override these now
# NOTE: You can override in the pop-up screen
userdateformat = "%Y/%m/%d" # noqa
lStripASCII = False # noqa
csvDelimiter = "," # noqa
scriptpath = "" # noqa
_column_widths_LFR = [] # noqa
daysToLookForward_LFR = 365 # noqa
lWriteBOMToExportFile_SWSS = True # noqa
extract_filename="%s.csv" %(myModuleID)
# >>> END THIS SCRIPT'S GLOBALS ############################################################################################
# COMMON CODE ##########################################################################################################
i_am_an_extension_so_run_headless = False # noqa
try:
myScriptName = os.path.basename(__file__)
except:
i_am_an_extension_so_run_headless = True # noqa
scriptExit = """
----------------------------------------------------------------------------------------------------------------------
Thank you for using %s! The author has other useful Extensions / Moneybot Python scripts available...:
Extension (.mxt) format only:
toolbox View Moneydance settings, diagnostics, fix issues, change settings and much more
Extension (.mxt) and Script (.py) Versions available:
extract_data Extract various data to screen and/or csv.. Consolidation of:
- stockglance2020 View summary of Securities/Stocks on screen, total by Security, export to csv
- extract_reminders_csv View reminders on screen, edit if required, extract all to csv
- extract_currency_history_csv Extract currency history to csv
- extract_investment_transactions_csv Extract investment transactions to csv
- extract_account_registers_csv Extract Account Register(s) to csv along with any attachments
list_future_reminders: View future reminders on screen. Allows you to set the days to look forward
A collection of useful ad-hoc scripts (zip file)
useful_scripts: Just unzip and select the script you want for the task at hand...
Visit: %s (Author's site)
----------------------------------------------------------------------------------------------------------------------
""" %(myScriptName, MYPYTHON_DOWNLOAD_URL)
# P=Display on Python Console, J=Display on MD (Java) Console Error Log, B=Both, D=If Debug Only print, DB=print both
def myPrint(where, *args):
global myScriptName, debug, i_am_an_extension_so_run_headless
if where[0] == "D" and not debug: return
printString = ""
for what in args:
printString += "%s " %what
printString = printString.strip()
if where == "P" or where == "B" or where[0] == "D":
if not i_am_an_extension_so_run_headless:
try:
print(printString)
except:
print("Error writing to screen...")
dump_sys_error_to_md_console_and_errorlog()
if where == "J" or where == "B" or where == "DB":
dt = datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S")
try:
System.err.write(myScriptName + ":" + dt + ": ")
System.err.write(printString)
System.err.write("\n")
except:
System.err.write(myScriptName + ":" + dt + ": "+"Error writing to console")
dump_sys_error_to_md_console_and_errorlog()
return
def dump_sys_error_to_md_console_and_errorlog( lReturnText=False ):
theText = ""
myPrint("B","Unexpected error caught: %s" %(sys.exc_info()[0]))
myPrint("B","Unexpected error caught: %s" %(sys.exc_info()[1]))
myPrint("B","Error on Script Line Number: %s" %(sys.exc_info()[2].tb_lineno))
if lReturnText:
theText += "\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
theText += "Unexpected error caught: %s\n" %(sys.exc_info()[0])
theText += "Unexpected error caught: %s\n" %(sys.exc_info()[1])
theText += "Error on Script Line Number: %s\n" %(sys.exc_info()[2].tb_lineno)
theText += "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"
return theText
return
def pad(theText, theLength):
theText = theText[:theLength].ljust(theLength, u" ")
return theText
def rpad(theText, theLength):
if not (isinstance(theText, unicode) or isinstance(theText, str)):
theText = str(theText)
theText = theText[:theLength].rjust(theLength, u" ")
return theText
def cpad(theText, theLength):
if not (isinstance(theText, unicode) or isinstance(theText, str)):
theText = str(theText)
if len(theText)>=theLength: return theText[:theLength]
padLength = int((theLength - len(theText)) / 2)
theText = theText[:theLength]
theText = ((" "*padLength)+theText+(" "*padLength))[:theLength]
return theText
myPrint("B", myScriptName, ": Python Script Initialising.......", "Build:", version_build)
def is_moneydance_loaded_properly():
global debug
if debug or moneydance is None or moneydance_data is None or moneydance_ui is None:
for theClass in ["moneydance", moneydance], ["moneydance_ui",moneydance_ui], ["moneydance_data",moneydance]:
myPrint("B","Moneydance Objects now....: Class: %s %s@{:x}".format(System.identityHashCode(theClass[1])) %(pad(theClass[0],20), theClass[1].__class__))
myPrint("P","")
if moneydance is not None and moneydance_data is not None and moneydance_ui is not None: # noqa
if debug: myPrint("B","Success - Moneydance variables are already set....")
return
myPrint("B","ERROR - Moneydance variables are NOT set properly....!")
# to cope with being run as Extension.... temporary
if moneydance is not None and (moneydance_data is None or moneydance_ui is None): # noqa
myPrint("B", "@@@ Moneydance variables not set (run as extension?) - attempting to manually set @@@")
try:
exec "global moneydance_ui;" + "moneydance_ui=moneydance.getUI();"
except:
myPrint("B","Failed to set moneydance_ui... This is a critical failure... (perhaps a run-time extension and too early - will continue)!")
# raise
try:
exec "global moneydance_data;" + "moneydance_data=moneydance.getCurrentAccount().getBook();"
except:
myPrint("B","Failed to set moneydance_data... I expect I am executing at MD runtime to self-install as a FeatureModule extension.. no matter...")
for theClass in ["moneydance",moneydance], ["moneydance_ui",moneydance_ui], ["moneydance_data",moneydance]:
myPrint("B","Moneydance Objects after manual setting....: Class: %s %s@{:x}".format(System.identityHashCode(theClass[1])) %(pad(theClass[0],20), theClass[1].__class__))
myPrint("P","")
return
is_moneydance_loaded_properly()
def getMonoFont():
global debug
try:
theFont = moneydance.getUI().getFonts().code
# if debug: myPrint("B","Success setting Font set to Moneydance code: %s" %theFont)
except:
theFont = Font("monospaced", Font.PLAIN, 15)
if debug: myPrint("B","Failed to Font set to Moneydance code - So using: %s" %theFont)
return theFont
def getTheSetting(what):
x = moneydance.getPreferences().getSetting(what, None)
if not x or x == u"": return None
return what + u": %s" %(x)
def get_home_dir():
homeDir = None
# noinspection PyBroadException
try:
if Platform.isOSX():
homeDir = System.getProperty(u"UserHome") # On a Mac in a Java VM, the homedir is hidden
else:
# homeDir = System.getProperty("user.home")
homeDir = os.path.expanduser(u"~") # Should work on Unix and Windows
if homeDir is None or homeDir == u"":
homeDir = System.getProperty(u"user.home")
if homeDir is None or homeDir == u"":
homeDir = os.environ.get(u"HOMEPATH")
except:
pass
if not homeDir: homeDir = u"?"
return homeDir
def getDecimalPoint(lGetPoint=False, lGetGrouping=False):
global debug
decimalFormat = DecimalFormat.getInstance()
# noinspection PyUnresolvedReferences
decimalSymbols = decimalFormat.getDecimalFormatSymbols()
if not lGetGrouping: lGetPoint = True
if lGetGrouping and lGetPoint: return u"error"
try:
if lGetPoint:
_decimalCharSep = decimalSymbols.getDecimalSeparator()
myPrint(u"D",u"Decimal Point Character: %s" %(_decimalCharSep))
return _decimalCharSep
if lGetGrouping:
_groupingCharSep = decimalSymbols.getGroupingSeparator()
if _groupingCharSep is None or _groupingCharSep == u"":
myPrint(u"B", u"Caught empty Grouping Separator")
return u""
if ord(_groupingCharSep) >= 128: # Probably a nbsp (160) = e.g. South Africa for example..!
myPrint(u"B", u"Caught special character in Grouping Separator. Ord(%s)" %(ord(_groupingCharSep)))
if ord(_groupingCharSep) == 160:
return u" (non breaking space character)"
return u" (non printable character)"
myPrint(u"D",u"Grouping Separator Character:", _groupingCharSep)
return _groupingCharSep
except:
myPrint(u"B",u"Error in getDecimalPoint() routine....?")
dump_sys_error_to_md_console_and_errorlog()
return u"error"
decimalCharSep = getDecimalPoint(lGetPoint=True)
groupingCharSep = getDecimalPoint(lGetGrouping=True)
# JOptionPane.DEFAULT_OPTION, JOptionPane.YES_NO_OPTION, JOptionPane.YES_NO_CANCEL_OPTION, JOptionPane.OK_CANCEL_OPTION
# JOptionPane.ERROR_MESSAGE, JOptionPane.INFORMATION_MESSAGE, JOptionPane.WARNING_MESSAGE, JOptionPane.QUESTION_MESSAGE, JOptionPane.PLAIN_MESSAGE
# Copies Moneydance_ui.showInfoMessage
def myPopupInformationBox(theParent=None, theMessage="What no message?!", theTitle="Info", theMessageType=JOptionPane.INFORMATION_MESSAGE):
if theParent is None:
if theMessageType == JOptionPane.PLAIN_MESSAGE or theMessageType == JOptionPane.INFORMATION_MESSAGE:
icon_to_use=moneydance.getUI().getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png")
JOptionPane.showMessageDialog(theParent, JTextPanel(theMessage), theTitle, theMessageType, icon_to_use)
return
JOptionPane.showMessageDialog(theParent, JTextPanel(theMessage), theTitle, theMessageType)
return
def wrapLines(message, numChars=40):
charCount = 0
result=""
for ch in message:
if ch == '\n' or ch == '\r':
charCount = 0
elif charCount > numChars and not Character.isWhitespace(ch):
result+="\n"
charCount = 0
else:
charCount+=1
result+=ch
return result
def myPopupAskBackup(theParent=None, theMessage="What no message?!"):
_options=["STOP", "PROCEED WITHOUT BACKUP", "DO BACKUP NOW"]
response = JOptionPane.showOptionDialog(theParent,
theMessage,
"PERFORM BACKUP BEFORE UPDATE?",
0,
JOptionPane.WARNING_MESSAGE,
None,
_options,
_options[0])
if response == 2:
myPrint("B", "User requested to perform Export Backup before update/fix - calling moneydance export backup routine...")
moneydance.getUI().saveToBackup(None)
return True
elif response == 1:
myPrint("B", "User DECLINED to perform Export Backup before update/fix...!")
return True
return False
# Copied Moneydance_ui.askQuestion
def myPopupAskQuestion(theParent=None,
theTitle="Question",
theQuestion="What?",
theOptionType=JOptionPane.YES_NO_OPTION,
theMessageType=JOptionPane.QUESTION_MESSAGE):
icon_to_use = None
if theParent is None:
if theMessageType == JOptionPane.PLAIN_MESSAGE or theMessageType == JOptionPane.INFORMATION_MESSAGE:
icon_to_use=moneydance.getUI().getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png")
# question = wrapLines(theQuestion)
question = theQuestion
result = JOptionPane.showConfirmDialog(theParent,
question,
theTitle,
theOptionType,
theMessageType,
icon_to_use) # getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"))
return result == 0
# Copies Moneydance .askForQuestion
def myPopupAskForInput(theParent,
theTitle,
theFieldLabel,
theFieldDescription="",
defaultValue=None,
isPassword=False,
theMessageType=JOptionPane.INFORMATION_MESSAGE):
icon_to_use = None
if theParent is None:
if theMessageType == JOptionPane.PLAIN_MESSAGE or theMessageType == JOptionPane.INFORMATION_MESSAGE:
icon_to_use=moneydance.getUI().getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png")
p = JPanel(GridBagLayout())
defaultText = None
if defaultValue: defaultText = defaultValue
if isPassword:
field = JPasswordField(defaultText)
else:
field = JTextField(defaultText)
x = 0
if theFieldLabel:
p.add(JLabel(theFieldLabel), GridC.getc(x, 0).east())
x+=1
p.add(field, GridC.getc(x, 0).field())
p.add(Box.createHorizontalStrut(244), GridC.getc(x, 0))
if theFieldDescription:
p.add(JTextPanel(theFieldDescription), GridC.getc(x, 1).field().colspan(x + 1))
if (JOptionPane.showConfirmDialog(theParent,
p,
theTitle,
JOptionPane.OK_CANCEL_OPTION,
theMessageType,
icon_to_use) == 0):
return field.getText()
return None
# APPLICATION_MODAL, DOCUMENT_MODAL, MODELESS, TOOLKIT_MODAL
class MyPopUpDialogBox():
def __init__(self, theParent=None, theStatus="", theMessage="", theWidth=200, theTitle="Info", lModal=True, lCancelButton=False, OKButtonText="OK", lAlertLevel=0):
self.theParent = theParent
self.theStatus = theStatus
self.theMessage = theMessage
self.theWidth = max(80,theWidth)
self.theTitle = theTitle
self.lModal = lModal
self.lCancelButton = lCancelButton
self.OKButtonText = OKButtonText
self.lAlertLevel = lAlertLevel
self.fakeJFrame = None
self._popup_d = None
self.lResult = [None]
if not self.theMessage.endswith("\n"): self.theMessage+="\n"
if self.OKButtonText == "": self.OKButtonText="OK"
class WindowListener(WindowAdapter):
def __init__(self, theDialog, theFakeFrame, lResult):
self.theDialog = theDialog
self.theFakeFrame = theFakeFrame
self.lResult = lResult
def windowClosing(self, WindowEvent): # noqa
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event: ", WindowEvent)
myPrint("DB", "JDialog Frame shutting down....")
self.lResult[0] = False
if self.theFakeFrame is not None:
self.theDialog.dispose()
self.theFakeFrame.dispose()
else:
self.theDialog.dispose()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
class OKButtonAction(AbstractAction):
# noinspection PyMethodMayBeStatic
def __init__(self, theDialog, theFakeFrame, lResult):
self.theDialog = theDialog
self.theFakeFrame = theFakeFrame
self.lResult = lResult
def actionPerformed(self, event):
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event: ", event)
self.lResult[0] = True
if self.theFakeFrame is not None:
self.theDialog.dispose()
self.theFakeFrame.dispose()
else:
self.theDialog.dispose()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
class CancelButtonAction(AbstractAction):
# noinspection PyMethodMayBeStatic
def __init__(self, theDialog, theFakeFrame, lResult):
self.theDialog = theDialog
self.theFakeFrame = theFakeFrame
self.lResult = lResult
def actionPerformed(self, event):
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event: ", event)
self.lResult[0] = False
if self.theFakeFrame is not None:
self.theDialog.dispose()
self.theFakeFrame.dispose()
else:
self.theDialog.dispose()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def kill(self):
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
self._popup_d.setVisible(False)
if self.fakeJFrame is not None:
self._popup_d.dispose()
self.fakeJFrame.dispose()
else:
self._popup_d.dispose()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def result(self):
global debug
return self.lResult[0]
def go(self):
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
# Create a fake JFrame so we can set the Icons...
if self.theParent is None:
self.fakeJFrame = MyJFrame()
self.fakeJFrame.setName(u"%s_fake_dialog" %(myModuleID))
self.fakeJFrame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE)
self.fakeJFrame.setUndecorated(True)
self.fakeJFrame.setVisible( False )
if not Platform.isOSX():
self.fakeJFrame.setIconImage(MDImages.getImage(moneydance.getSourceInformation().getIconResource()))
if self.lModal:
# noinspection PyUnresolvedReferences
self._popup_d = JDialog(self.theParent, self.theTitle, Dialog.ModalityType.APPLICATION_MODAL)
else:
# noinspection PyUnresolvedReferences
self._popup_d = JDialog(self.theParent, self.theTitle, Dialog.ModalityType.MODELESS)
self._popup_d.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE)
shortcut = Toolkit.getDefaultToolkit().getMenuShortcutKeyMaskEx()
# Add standard CMD-W keystrokes etc to close window
self._popup_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_W, shortcut), "close-window")
self._popup_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_F4, shortcut), "close-window")
self._popup_d.getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW).put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), "close-window")
self._popup_d.getRootPane().getActionMap().put("close-window", self.CancelButtonAction(self._popup_d, self.fakeJFrame,self.lResult))
self._popup_d.addWindowListener(self.WindowListener(self._popup_d, self.fakeJFrame,self.lResult))
if (not Platform.isMac()):
# moneydance_ui.getImages()
self._popup_d.setIconImage(MDImages.getImage(moneydance.getSourceInformation().getIconResource()))
displayJText = JTextArea(self.theMessage)
displayJText.setFont( getMonoFont() )
displayJText.setEditable(False)
displayJText.setLineWrap(False)
displayJText.setWrapStyleWord(False)
_popupPanel=JPanel()
# maxHeight = 500
_popupPanel.setLayout(GridLayout(0,1))
_popupPanel.setBorder(EmptyBorder(8, 8, 8, 8))
# _popupPanel.setMinimumSize(Dimension(self.theWidth, 0))
# _popupPanel.setMaximumSize(Dimension(self.theWidth, maxHeight))
if self.theStatus:
_label1 = JLabel(pad(self.theStatus,self.theWidth-20))
_label1.setForeground(Color.BLUE)
_popupPanel.add(_label1)
myScrollPane = JScrollPane(displayJText, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED,JScrollPane.HORIZONTAL_SCROLLBAR_AS_NEEDED)
if displayJText.getLineCount()>5:
# myScrollPane.setMinimumSize(Dimension(self.theWidth-20, 10))
# myScrollPane.setMaximumSize(Dimension(self.theWidth-20, maxHeight-100))
myScrollPane.setWheelScrollingEnabled(True)
_popupPanel.add(myScrollPane)
else:
_popupPanel.add(displayJText)
buttonPanel = JPanel()
if self.lModal or self.lCancelButton:
buttonPanel.setLayout(FlowLayout(FlowLayout.CENTER))
if self.lCancelButton:
cancel_button = JButton("CANCEL")
cancel_button.setPreferredSize(Dimension(100,40))
cancel_button.setBackground(Color.LIGHT_GRAY)
cancel_button.setBorderPainted(False)
cancel_button.setOpaque(True)
cancel_button.addActionListener( self.CancelButtonAction(self._popup_d, self.fakeJFrame,self.lResult) )
buttonPanel.add(cancel_button)
if self.lModal:
ok_button = JButton(self.OKButtonText)
if len(self.OKButtonText) <= 2:
ok_button.setPreferredSize(Dimension(100,40))
else:
ok_button.setPreferredSize(Dimension(200,40))
ok_button.setBackground(Color.LIGHT_GRAY)
ok_button.setBorderPainted(False)
ok_button.setOpaque(True)
ok_button.addActionListener( self.OKButtonAction(self._popup_d, self.fakeJFrame, self.lResult) )
buttonPanel.add(ok_button)
_popupPanel.add(buttonPanel)
if self.lAlertLevel>=2:
# internalScrollPane.setBackground(Color.RED)
# theJText.setBackground(Color.RED)
# theJText.setForeground(Color.BLACK)
displayJText.setBackground(Color.RED)
displayJText.setForeground(Color.BLACK)
_popupPanel.setBackground(Color.RED)
_popupPanel.setForeground(Color.BLACK)
buttonPanel.setBackground(Color.RED)
myScrollPane.setBackground(Color.RED)
elif self.lAlertLevel>=1:
# internalScrollPane.setBackground(Color.YELLOW)
# theJText.setBackground(Color.YELLOW)
# theJText.setForeground(Color.BLACK)
displayJText.setBackground(Color.YELLOW)
displayJText.setForeground(Color.BLACK)
_popupPanel.setBackground(Color.YELLOW)
_popupPanel.setForeground(Color.BLACK)
buttonPanel.setBackground(Color.YELLOW)
myScrollPane.setBackground(Color.RED)
self._popup_d.add(_popupPanel)
self._popup_d.pack()
self._popup_d.setLocationRelativeTo(None)
self._popup_d.setVisible(True)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return self.lResult[0]
def play_the_money_sound():
# Seems to cause a crash on Virtual Machine with no Audio - so just in case....
try:
moneydance.getUI().getSounds().playSound("cash_register.wav")
except:
pass
return
def get_filename_addition():
cal = Calendar.getInstance()
hhmm = str(10000 + cal.get(11) * 100 + cal.get(12))[1:]
nameAddition = "-" + str(DateUtil.getStrippedDateInt()) + "-"+hhmm
return nameAddition
def check_file_writable(fnm):
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" )
myPrint("DB","Checking path: ", fnm)
if os.path.exists(fnm):
myPrint("DB", "path exists..")
# path exists
if os.path.isfile(fnm): # is it a file or a dir?
myPrint("DB","path is a file..")
# also works when file is a link and the target is writable
return os.access(fnm, os.W_OK)
else:
myPrint("DB", "path is not a file..")
return False # path is a dir, so cannot write as a file
# target does not exist, check perms on parent dir
myPrint("DB","path does not exist...")
pdir = os.path.dirname(fnm)
if not pdir: pdir = '.'
# target is creatable if parent dir is writable
return os.access(pdir, os.W_OK)
class ExtFilenameFilter(FilenameFilter):
ext = ""
def __init__(self, ext):
self.ext = "." + ext.upper()
def accept(self, thedir, filename): # noqa
if filename is not None and filename.upper().endswith(self.ext):
return True
return False
try:
moneydanceIcon = MDImages.getImage(moneydance.getSourceInformation().getIconResource())
except:
moneydanceIcon = None
def MDDiag():
global debug
myPrint("D", "Moneydance Build:", moneydance.getVersion(), "Build:", moneydance.getBuild())
MDDiag()
myPrint("DB","System file encoding is:", sys.getfilesystemencoding() ) # Not used, but interesting. Perhaps useful when switching between Windows/Macs and writing files...
def checkVersions():
global debug
lError = False
plat_j = platform.system()
plat_p = platform.python_implementation()
python_maj = sys.version_info.major
python_min = sys.version_info.minor
myPrint("DB","Platform:", plat_p, plat_j, python_maj, ".", python_min)
myPrint("DB", sys.version)
if plat_p != "Jython":
lError = True
myPrint("DB", "Error: Script requires Jython")
if plat_j != "Java":
lError = True
myPrint("DB", "Error: Script requires Java base")
if (python_maj != 2 or python_min != 7):
lError = True
myPrint("DB", "\n\nError: Script was designed on version 2.7. By all means bypass this test and see what happens.....")
if lError:
myPrint("J", "Platform version issue - will terminate script!")
myPrint("P", "\n@@@ TERMINATING PROGRAM @@@\n")
raise(Exception("Platform version issue - will terminate script!"))
return not lError
checkVersions()
def setDefaultFonts():
myFont = moneydance.getUI().getFonts().defaultText
if myFont.getSize()>18:
try:
myFont = myFont.deriveFont(16.0)
myPrint("B", "I have reduced the font size down to point-size 16 - Default Fonts are now set to: %s" %(myFont))
except:
myPrint("B","ERROR - failed to override font point size down to 16.... will ignore and continue. Font set to: %s" %(myFont))
else:
myPrint("DB", "Attempting to set default font to %s" %myFont)
try:
UIManager.getLookAndFeelDefaults().put("defaultFont", myFont )
# https://thebadprogrammer.com/swing-uimanager-keys/
UIManager.put("CheckBoxMenuItem.acceleratorFont", myFont)
UIManager.put("Button.font", myFont)
UIManager.put("ToggleButton.font", myFont)
UIManager.put("RadioButton.font", myFont)
UIManager.put("CheckBox.font", myFont)
UIManager.put("ColorChooser.font", myFont)
UIManager.put("ComboBox.font", myFont)
UIManager.put("Label.font", myFont)
UIManager.put("List.font", myFont)
UIManager.put("MenuBar.font", myFont)
UIManager.put("Menu.acceleratorFont", myFont)
UIManager.put("RadioButtonMenuItem.acceleratorFont", myFont)
UIManager.put("MenuItem.acceleratorFont", myFont)
UIManager.put("MenuItem.font", myFont)
UIManager.put("RadioButtonMenuItem.font", myFont)
UIManager.put("CheckBoxMenuItem.font", myFont)
UIManager.put("OptionPane.buttonFont", myFont)
UIManager.put("OptionPane.messageFont", myFont)
UIManager.put("Menu.font", myFont)
UIManager.put("PopupMenu.font", myFont)
UIManager.put("OptionPane.font", myFont)
UIManager.put("Panel.font", myFont)
UIManager.put("ProgressBar.font", myFont)
UIManager.put("ScrollPane.font", myFont)
UIManager.put("Viewport.font", myFont)
UIManager.put("TabbedPane.font", myFont)
UIManager.put("Slider.font", myFont)
UIManager.put("Table.font", myFont)
UIManager.put("TableHeader.font", myFont)
UIManager.put("TextField.font", myFont)
UIManager.put("Spinner.font", myFont)
UIManager.put("PasswordField.font", myFont)
UIManager.put("TextArea.font", myFont)
UIManager.put("TextPane.font", myFont)
UIManager.put("EditorPane.font", myFont)
UIManager.put("TabbedPane.smallFont", myFont)
UIManager.put("TitledBorder.font", myFont)
UIManager.put("ToolBar.font", myFont)
UIManager.put("ToolTip.font", myFont)
UIManager.put("Tree.font", myFont)
UIManager.put("FormattedTextField.font", myFont)
UIManager.put("IconButton.font", myFont)
UIManager.put("InternalFrame.optionDialogTitleFont", myFont)
UIManager.put("InternalFrame.paletteTitleFont", myFont)
UIManager.put("InternalFrame.titleFont", myFont)
except:
myPrint("B","Failed to set Swing default fonts to use Moneydance defaults... sorry")
return
if moneydance_ui is not None:
setDefaultFonts()
def who_am_i():
try:
username = System.getProperty("user.name")
except:
username = "???"
return username
def getHomeDir():
# Yup - this can be all over the place...
myPrint("D", 'System.getProperty("user.dir")', System.getProperty("user.dir"))
myPrint("D", 'System.getProperty("UserHome")', System.getProperty("UserHome"))
myPrint("D", 'System.getProperty("user.home")', System.getProperty("user.home"))
myPrint("D", 'os.path.expanduser("~")', os.path.expanduser("~"))
myPrint("D", 'os.environ.get("HOMEPATH")', os.environ.get("HOMEPATH"))
return
def amIaMac():
return Platform.isOSX()
myPrint("D", "I am user:", who_am_i())
if debug: getHomeDir()
lIamAMac = amIaMac()
def myDir():
global lIamAMac
homeDir = None
try:
if lIamAMac:
homeDir = System.getProperty("UserHome") # On a Mac in a Java VM, the homedir is hidden
else:
# homeDir = System.getProperty("user.home")
homeDir = os.path.expanduser("~") # Should work on Unix and Windows
if homeDir is None or homeDir == "":
homeDir = System.getProperty("user.home")
if homeDir is None or homeDir == "":
homeDir = os.environ.get("HOMEPATH")
except:
pass
if homeDir is None or homeDir == "":
homeDir = moneydance.getCurrentAccountBook().getRootFolder().getParent() # Better than nothing!
myPrint("DB", "Home Directory selected...:", homeDir)
if homeDir is None: return ""
return homeDir
# noinspection PyArgumentList
class JTextFieldLimitYN(PlainDocument):
limit = 10 # Default
toUpper = False
what = ""
def __init__(self, limit, toUpper, what):
super(PlainDocument, self).__init__()
self.limit = limit
self.toUpper = toUpper
self.what = what
def insertString(self, myOffset, myString, myAttr):
if (myString is None): return
if self.toUpper: myString = myString.upper()
if (self.what == "YN" and (myString in "YN")) \
or (self.what == "DELIM" and (myString in ";|,")) \
or (self.what == "1234" and (myString in "1234")) \
or (self.what == "CURR"):
if ((self.getLength() + len(myString)) <= self.limit):
super(JTextFieldLimitYN, self).insertString(myOffset, myString, myAttr) # noqa
def fix_delimiter( theDelimiter ):
try:
if sys.version_info.major >= 3: return theDelimiter
if sys.version_info.major < 2: return str(theDelimiter)
if sys.version_info.minor > 7: return theDelimiter
if sys.version_info.minor < 7: return str(theDelimiter)
if sys.version_info.micro >= 2: return theDelimiter
except:
pass
return str( theDelimiter )
def get_StuWareSoftSystems_parameters_from_file(myFile="StuWareSoftSystems.dict"):
global debug, myParameters, lPickle_version_warning, version_build, _resetParameters # noqa
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" )
if _resetParameters:
myPrint("B", "User has specified to reset parameters... keeping defaults and skipping pickle()")
myParameters = {}
return
old_dict_filename = os.path.join("..", myFile)
# Pickle was originally encrypted, no need, migrating to unencrypted
migratedFilename = os.path.join(moneydance.getCurrentAccountBook().getRootFolder().getAbsolutePath(),myFile)
myPrint("DB", "Now checking for parameter file:", migratedFilename)
if os.path.exists( migratedFilename ):
myPrint("DB", "loading parameters from non-encrypted Pickle file:", migratedFilename)
myPrint("DB", "Parameter file", migratedFilename, "exists..")
# Open the file
try:
istr = FileInputStream(migratedFilename)
load_file = FileUtil.wrap(istr)
# noinspection PyTypeChecker
myParameters = pickle.load(load_file)
load_file.close()
except FileNotFoundException:
myPrint("B", "Error: failed to find parameter file...")
myParameters = None
except EOFError:
myPrint("B", "Error: reached EOF on parameter file....")
myParameters = None
except:
myPrint("B","Error opening Pickle File (will try encrypted version) - Unexpected error ", sys.exc_info()[0])
myPrint("B","Error opening Pickle File (will try encrypted version) - Unexpected error ", sys.exc_info()[1])
myPrint("B","Error opening Pickle File (will try encrypted version) - Line Number: ", sys.exc_info()[2].tb_lineno)
# OK, so perhaps from older version - encrypted, try to read
try:
local_storage = moneydance.getCurrentAccountBook().getLocalStorage()
istr = local_storage.openFileForReading(old_dict_filename)
load_file = FileUtil.wrap(istr)
# noinspection PyTypeChecker
myParameters = pickle.load(load_file)
load_file.close()
myPrint("B","Success loading Encrypted Pickle file - will migrate to non encrypted")
lPickle_version_warning = True
except:
myPrint("B","Opening Encrypted Pickle File - Unexpected error ", sys.exc_info()[0])
myPrint("B","Opening Encrypted Pickle File - Unexpected error ", sys.exc_info()[1])
myPrint("B","Error opening Pickle File - Line Number: ", sys.exc_info()[2].tb_lineno)
myPrint("B", "Error: Pickle.load() failed.... Is this a restored dataset? Will ignore saved parameters, and create a new file...")
myParameters = None
if myParameters is None:
myParameters = {}
myPrint("DB","Parameters did not load, will keep defaults..")
else:
myPrint("DB","Parameters successfully loaded from file...")
else:
myPrint("J", "Parameter Pickle file does not exist - will use default and create new file..")
myPrint("D", "Parameter Pickle file does not exist - will use default and create new file..")
myParameters = {}
if not myParameters: return
myPrint("DB","myParameters read from file contains...:")
for key in sorted(myParameters.keys()):
myPrint("DB","...variable:", key, myParameters[key])
if myParameters.get("debug") is not None: debug = myParameters.get("debug")
if myParameters.get("lUseMacFileChooser") is not None:
myPrint("B", "Detected old lUseMacFileChooser parameter/variable... Will delete it...")
myParameters.pop("lUseMacFileChooser", None) # Old variable - not used - delete from parameter file
myPrint("DB","Parameter file loaded if present and myParameters{} dictionary set.....")
# Now load into memory!
load_StuWareSoftSystems_parameters_into_memory()
return
def save_StuWareSoftSystems_parameters_to_file(myFile="StuWareSoftSystems.dict"):
global debug, myParameters, lPickle_version_warning, version_build
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" )
if myParameters is None: myParameters = {}
# Don't forget, any parameters loaded earlier will be preserved; just add changed variables....
myParameters["__Author"] = "Stuart Beesley - (c) StuWareSoftSystems"
myParameters["debug"] = debug
dump_StuWareSoftSystems_parameters_from_memory()
# Pickle was originally encrypted, no need, migrating to unencrypted
migratedFilename = os.path.join(moneydance.getCurrentAccountBook().getRootFolder().getAbsolutePath(),myFile)
myPrint("DB","Will try to save parameter file:", migratedFilename)
ostr = FileOutputStream(migratedFilename)
myPrint("DB", "about to Pickle.dump and save parameters to unencrypted file:", migratedFilename)
try:
save_file = FileUtil.wrap(ostr)
# noinspection PyTypeChecker
pickle.dump(myParameters, save_file)
save_file.close()
myPrint("DB","myParameters now contains...:")
for key in sorted(myParameters.keys()):
myPrint("DB","...variable:", key, myParameters[key])
except:
myPrint("B", "Error - failed to create/write parameter file.. Ignoring and continuing.....")
dump_sys_error_to_md_console_and_errorlog()
return
myPrint("DB","Parameter file written and parameters saved to disk.....")
return
def get_time_stamp_as_nice_text( timeStamp ):
prettyDate = ""
try:
c = Calendar.getInstance()
c.setTime(Date(timeStamp))
dateFormatter = SimpleDateFormat("yyyy/MM/dd HH:mm:ss(.SSS) Z z zzzz")
prettyDate = dateFormatter.format(c.getTime())
except:
pass
return prettyDate
def currentDateTimeMarker():
c = Calendar.getInstance()
dateformat = SimpleDateFormat("_yyyyMMdd_HHmmss")
_datetime = dateformat.format(c.getTime())
return _datetime
def destroyOldFrames(moduleName):
frames = JFrame.getFrames()
for fr in frames:
if fr.getName().lower().startswith(moduleName):
myPrint("DB","Found old frame %s and active status is: %s" %(fr.getName(),fr.isActiveInMoneydance))
# if fr.isActiveInMoneydance:
try:
fr.isActiveInMoneydance = False
fr.setVisible(False)
fr.dispose() # This should call windowClosed() which should remove MD listeners.....
myPrint("DB","disposed of old frame: %s" %(fr.getName()))
except:
myPrint("B","Failed to dispose old frame: %s" %(fr.getName()))
dump_sys_error_to_md_console_and_errorlog()
def classPrinter(className, theObject):
try:
text = "Class: %s %s@{:x}".format(System.identityHashCode(theObject)) %(className, theObject.__class__)
except:
text = "Error in classPrinter(): %s: %s" %(className, theObject)
return text
# END COMMON DEFINITIONS ###############################################################################################
# END COMMON DEFINITIONS ###############################################################################################
# END COMMON DEFINITIONS ###############################################################################################
# >>> CUSTOMISE & DO THIS FOR EACH SCRIPT
# >>> CUSTOMISE & DO THIS FOR EACH SCRIPT
# >>> CUSTOMISE & DO THIS FOR EACH SCRIPT
def load_StuWareSoftSystems_parameters_into_memory():
global debug, myParameters, lPickle_version_warning, version_build
# >>> THESE ARE THIS SCRIPT's PARAMETERS TO LOAD
global __list_future_reminders, lStripASCII, csvDelimiter, scriptpath, userdateformat, _column_widths_LFR
global lWriteBOMToExportFile_SWSS, daysToLookForward_LFR # noqa
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" )
myPrint("DB", "Loading variables into memory...")
if myParameters is None: myParameters = {}
if myParameters.get("__list_future_reminders") is not None:
__list_future_reminders = myParameters.get("__list_future_reminders")
if myParameters.get("userdateformat") is not None: userdateformat = myParameters.get("userdateformat")
if myParameters.get("lStripASCII") is not None: lStripASCII = myParameters.get("lStripASCII")
if myParameters.get("csvDelimiter") is not None: csvDelimiter = myParameters.get("csvDelimiter")
if myParameters.get("_column_widths_LFR") is not None: _column_widths_LFR = myParameters.get("_column_widths_LFR")
if myParameters.get("daysToLookForward_LFR") is not None: daysToLookForward_LFR = myParameters.get("daysToLookForward_LFR")
if myParameters.get("lWriteBOMToExportFile_SWSS") is not None: lWriteBOMToExportFile_SWSS = myParameters.get("lWriteBOMToExportFile_SWSS") # noqa
if myParameters.get("scriptpath") is not None:
scriptpath = myParameters.get("scriptpath")
if not os.path.isdir(scriptpath):
myPrint("B", "Warning: loaded parameter scriptpath does not appear to be a valid directory:", scriptpath, "will ignore")
scriptpath = ""
myPrint("DB","myParameters{} set into memory (as variables).....")
return
# >>> CUSTOMISE & DO THIS FOR EACH SCRIPT
def dump_StuWareSoftSystems_parameters_from_memory():
global debug, myParameters, lPickle_version_warning, version_build
global lWriteBOMToExportFile_SWSS # noqa
# >>> THESE ARE THIS SCRIPT's PARAMETERS TO SAVE
global __list_future_reminders, lStripASCII, csvDelimiter, scriptpath, lDisplayOnly, userdateformat, _column_widths_LFR, daysToLookForward_LFR
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()" )
# NOTE: Parameters were loaded earlier on... Preserve existing, and update any used ones...
# (i.e. other StuWareSoftSystems programs might be sharing the same file)
if myParameters is None: myParameters = {}
myParameters["__list_future_reminders"] = version_build
myParameters["userdateformat"] = userdateformat
myParameters["lStripASCII"] = lStripASCII
myParameters["csvDelimiter"] = csvDelimiter
myParameters["_column_widths_LFR"] = _column_widths_LFR
myParameters["daysToLookForward_LFR"] = daysToLookForward_LFR
myParameters["lWriteBOMToExportFile_SWSS"] = lWriteBOMToExportFile_SWSS
if not lDisplayOnly and scriptpath != "" and os.path.isdir(scriptpath):
myParameters["scriptpath"] = scriptpath
myPrint("DB","variables dumped from memory back into myParameters{}.....")
return
get_StuWareSoftSystems_parameters_from_file()
myPrint("DB", "DEBUG IS ON..")
# clear up any old left-overs....
destroyOldFrames(myModuleID)
# END ALL CODE COPY HERE ###############################################################################################
# END ALL CODE COPY HERE ###############################################################################################
# END ALL CODE COPY HERE ###############################################################################################
moneydance_ui.firstMainFrame.setStatus(">> StuWareSoftSystems - %s launching......." %(myScriptName),0)
# Create fake JFrame() so that all popups have correct Moneydance Icons etc
JFrame.setDefaultLookAndFeelDecorated(True)
list_future_reminders_frame_ = MyJFrame()
list_future_reminders_frame_.setName(u"%s_fake" %(myModuleID))
if (not Platform.isMac()):
moneydance_ui.getImages()
list_future_reminders_frame_.setIconImage(MDImages.getImage(moneydance_ui.getMain().getSourceInformation().getIconResource()))
list_future_reminders_frame_.setVisible(False)
list_future_reminders_frame_.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE)
class CloseAboutAction(AbstractAction):
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def __init__(self, theFrame):
self.theFrame = theFrame
def actionPerformed(self, event):
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event:", event)
self.theFrame.dispose()
def about_this_script():
global debug, scriptExit
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
# noinspection PyUnresolvedReferences
about_d = JDialog(list_future_reminders_frame_, "About", Dialog.ModalityType.MODELESS)
shortcut = Toolkit.getDefaultToolkit().getMenuShortcutKeyMaskEx()
about_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_W, shortcut), "close-window")
about_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_F4, shortcut), "close-window")
about_d.getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW).put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), "close-window")
about_d.getRootPane().getActionMap().put("close-window", CloseAboutAction(about_d))
about_d.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE) # The CloseAction() and WindowListener() will handle dispose() - else change back to DISPOSE_ON_CLOSE
if (not Platform.isMac()):
# moneydance_ui.getImages()
about_d.setIconImage(MDImages.getImage(moneydance_ui.getMain().getSourceInformation().getIconResource()))
aboutPanel=JPanel()
aboutPanel.setLayout(FlowLayout(FlowLayout.LEFT))
aboutPanel.setPreferredSize(Dimension(1120, 500))
_label1 = JLabel(pad("Author: Stuart Beesley", 800))
_label1.setForeground(Color.BLUE)
aboutPanel.add(_label1)
_label2 = JLabel(pad("StuWareSoftSystems (2020)", 800))
_label2.setForeground(Color.BLUE)
aboutPanel.add(_label2)
displayString=scriptExit
displayJText = JTextArea(displayString)
displayJText.setFont( getMonoFont() )
displayJText.setEditable(False)
# displayJText.setCaretPosition(0)
displayJText.setLineWrap(False)
displayJText.setWrapStyleWord(False)
displayJText.setMargin(Insets(8, 8, 8, 8))
# displayJText.setBackground((mdGUI.getColors()).defaultBackground)
# displayJText.setForeground((mdGUI.getColors()).defaultTextForeground)
aboutPanel.add(displayJText)
about_d.add(aboutPanel)
about_d.pack()
about_d.setLocationRelativeTo(None)
about_d.setVisible(True)
return
class DoTheMenu(AbstractAction):
def __init__(self, menu):
self.menu = menu
def actionPerformed(self, event): # noqa
global list_future_reminders_frame_, debug
global _column_widths_LFR, daysToLookForward_LFR, saveStatusLabel
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event: ", event )
# ##########################################################################################################
if event.getActionCommand().lower().startswith("change look"):
days = myPopupAskForInput(list_future_reminders_frame_,
"LOOK FORWARD",
"DAYS:",
"Enter the number of days to look forward",
defaultValue=str(daysToLookForward_LFR))
if days is None or days == "" or not StringUtils.isInteger(days) or int(days) < 1 or int(days) > 365:
myPopupInformationBox(list_future_reminders_frame_,"ERROR - Days must be between 1-365 - no changes made....",theMessageType=JOptionPane.WARNING_MESSAGE)
else:
daysToLookForward_LFR = int(days)
myPrint("B","Days to look forward changed to %s" %(daysToLookForward_LFR))
formatDate = DateUtil.incrementDate(DateUtil.getStrippedDateInt(),0,0,daysToLookForward_LFR)
formatDate = str(formatDate/10000).zfill(4) + "-" + str((formatDate/100)%100).zfill(2) + "-" + str(formatDate%100).zfill(2)
saveStatusLabel.setText("** Looking forward %s days to %s **" %(daysToLookForward_LFR, formatDate))
RefreshMenuAction().refresh()
# ##########################################################################################################
if event.getActionCommand().lower().startswith("debug"):
debug = not debug
myPrint("B","DEBUG is now set to: %s" %(debug))
# ##########################################################################################################
if event.getActionCommand().lower().startswith("reset"):
_column_widths_LFR = []
RefreshMenuAction().refresh()
# ##########################################################################################################
if event.getActionCommand().lower().startswith("refresh"):
RefreshMenuAction().refresh()
# ##########################################################################################################
if event.getActionCommand().lower().startswith("extract") or event.getActionCommand().lower().startswith("close"):
ExtractMenuAction().extract_or_close()
# ##########################################################################################################
if event.getActionCommand() == "About":
about_this_script()
# Save parameters now...
if (event.getActionCommand().lower().startswith("change look")
or event.getActionCommand().lower().startswith("debug")
or event.getActionCommand().lower().startswith("reset")
or event.getActionCommand().lower().startswith("extract")
or event.getActionCommand().lower().startswith("close")):
try:
save_StuWareSoftSystems_parameters_to_file()
except:
myPrint("B", "Error - failed to save parameters to pickle file...!")
dump_sys_error_to_md_console_and_errorlog()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def terminate_script():
global debug, list_future_reminders_frame_, lDisplayOnly, lGlobalErrorDetected
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
# We now do this at the beginning, not the end....
# try:
# save_StuWareSoftSystems_parameters_to_file()
# except:
# myPrint("B", "Error - failed to save parameters to pickle file...!")
# dump_sys_error_to_md_console_and_errorlog()
moneydance_ui.firstMainFrame.setStatus(">> StuWareSoftSystems - thanks for using >> %s......." %(myScriptName),0)
if not i_am_an_extension_so_run_headless: print(scriptExit)
try:
list_future_reminders_frame_.dispose()
except:
myPrint("B","Error. Final dispose failed....?")
dump_sys_error_to_md_console_and_errorlog()
# Cleanup any mess and left-overs
destroyOldFrames(myModuleID)
myPrint("B","Script/extension is terminating.....")
return
csvfilename = None
if decimalCharSep != "." and csvDelimiter == ",": csvDelimiter = ";" # Override for EU countries or where decimal point is actually a comma...
myPrint("DB", "Decimal point:", decimalCharSep, "Grouping Separator", groupingCharSep, "CSV Delimiter set to:", csvDelimiter)
sdf = SimpleDateFormat("dd/MM/yyyy")
dateStrings=["dd/mm/yyyy", "mm/dd/yyyy", "yyyy/mm/dd", "yyyymmdd"]
# 1=dd/mm/yyyy, 2=mm/dd/yyyy, 3=yyyy/mm/dd, 4=yyyymmdd
label1 = JLabel("Select Output Date Format (default yyyy/mm/dd):")
user_dateformat = JComboBox(dateStrings)
if userdateformat == "%d/%m/%Y": user_dateformat.setSelectedItem("dd/mm/yyyy")
elif userdateformat == "%m/%d/%Y": user_dateformat.setSelectedItem("mm/dd/yyyy")
elif userdateformat == "%Y%m%d": user_dateformat.setSelectedItem("yyyymmdd")
else: user_dateformat.setSelectedItem("yyyy/mm/dd")
labelRC = JLabel("Reset Column Widths to Defaults?")
user_selectResetColumns = JCheckBox("", False)
label2 = JLabel("Strip non ASCII characters from CSV export?")
user_selectStripASCII = JCheckBox("", lStripASCII)
delimStrings = [";","|",","]
label3 = JLabel("Change CSV Export Delimiter from default to: ';|,'")
user_selectDELIMITER = JComboBox(delimStrings)
user_selectDELIMITER.setSelectedItem(csvDelimiter)
labelBOM = JLabel("Write BOM (Byte Order Mark) to file (helps Excel open files)?")
user_selectBOM = JCheckBox("", lWriteBOMToExportFile_SWSS)
label4 = JLabel("Turn DEBUG Verbose messages on?")
user_selectDEBUG = JCheckBox("", debug)
userFilters = JPanel(GridLayout(0, 2))
userFilters.add(label1)
userFilters.add(user_dateformat)
userFilters.add(labelRC)
userFilters.add(user_selectResetColumns)
userFilters.add(label2)
userFilters.add(user_selectStripASCII)
userFilters.add(label3)
userFilters.add(user_selectDELIMITER)
userFilters.add(labelBOM)
userFilters.add(user_selectBOM)
userFilters.add(label4)
userFilters.add(user_selectDEBUG)
lExit = False
# lDisplayOnly = False
lDisplayOnly = True
# options = ["Abort", "Display & CSV Export", "Display Only"]
# userAction = (JOptionPane.showOptionDialog(list_future_reminders_frame_,
# userFilters,
# "%s(build: %s) Set Script Parameters...." % (myScriptName, version_build),
# JOptionPane.OK_CANCEL_OPTION,
# JOptionPane.QUESTION_MESSAGE,
# moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"),
# options,
# options[2])
# )
# if userAction == 1: # Display & Export
# myPrint("DB", "Display and export chosen")
# lDisplayOnly = False
# elif userAction == 2: # Display Only
# lDisplayOnly = True
# myPrint("DB", "Display only with no export chosen")
# else:
# # Abort
# myPrint("DB", "User Cancelled Parameter selection.. Will abort..")
# myPopupInformationBox(list_future_reminders_frame_, "User Cancelled Parameter selection.. Will abort..", "PARAMETERS")
# lDisplayOnly = False
# lExit = True
if not lExit:
debug = user_selectDEBUG.isSelected()
myPrint("DB", "DEBUG turned on")
if debug:
myPrint("DB","Parameters Captured",
"User Date Format:", user_dateformat.getSelectedItem(),
"Reset Columns", user_selectResetColumns.isSelected(),
"Strip ASCII:", user_selectStripASCII.isSelected(),
"Write BOM to file:", user_selectBOM.isSelected(),
"Verbose Debug Messages: ", user_selectDEBUG.isSelected(),
"CSV File Delimiter:", user_selectDELIMITER.getSelectedItem())
# endif
if user_dateformat.getSelectedItem() == "dd/mm/yyyy": userdateformat = "%d/%m/%Y"
elif user_dateformat.getSelectedItem() == "mm/dd/yyyy": userdateformat = "%m/%d/%Y"
elif user_dateformat.getSelectedItem() == "yyyy/mm/dd": userdateformat = "%Y/%m/%d"
elif user_dateformat.getSelectedItem() == "yyyymmdd": userdateformat = "%Y%m%d"
else:
# PROBLEM / default
userdateformat = "%Y/%m/%d"
if user_selectResetColumns.isSelected():
myPrint("B","User asked to reset columns.... Resetting Now....")
_column_widths_LFR=[] # This will invalidate them
lStripASCII = user_selectStripASCII.isSelected()
csvDelimiter = user_selectDELIMITER.getSelectedItem()
if csvDelimiter == "" or (not (csvDelimiter in ";|,")):
myPrint("B", "Invalid Delimiter:", csvDelimiter, "selected. Overriding with:','")
csvDelimiter = ","
if decimalCharSep == csvDelimiter:
myPrint("B", "WARNING: The CSV file delimiter:", csvDelimiter, "cannot be the same as your decimal point character:", decimalCharSep, " - Proceeding without file export!!")
lDisplayOnly = True
myPopupInformationBox(None, "ERROR - The CSV file delimiter: %s ""cannot be the same as your decimal point character: %s. "
"Proceeding without file export (i.e. I will do nothing)!!" %(csvDelimiter, decimalCharSep),
"INVALID FILE DELIMITER", theMessageType=JOptionPane.ERROR_MESSAGE)
lWriteBOMToExportFile_SWSS = user_selectBOM.isSelected()
myPrint("B", "User Parameters...")
myPrint("B", "user date format....:", userdateformat)
# Now get the export filename
csvfilename = None
if not lDisplayOnly: # i.e. we have asked for a file export - so get the filename
if lStripASCII:
myPrint("DB", "Will strip non-ASCII characters - e.g. Currency symbols from output file...", " Using Delimiter:", csvDelimiter)
else:
myPrint("DB", "Non-ASCII characters will not be stripped from file: ", " Using Delimiter:", csvDelimiter)
if lWriteBOMToExportFile_SWSS:
myPrint("B", "Script will add a BOM (Byte Order Mark) to front of the extracted file...")
else:
myPrint("B", "No BOM (Byte Order Mark) will be added to the extracted file...")
def grabTheFile():
global debug, lDisplayOnly, csvfilename, lIamAMac, scriptpath, myScriptName
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
if scriptpath == "" or scriptpath is None: # No parameter saved / loaded from disk
scriptpath = myDir()
myPrint("DB", "Default file export output path is....:", scriptpath)
csvfilename = ""
if lIamAMac:
myPrint("D", "MacOS X detected: Therefore I will run FileDialog with no extension filters to get filename....")
# jFileChooser hangs on Mac when using file extension filters, also looks rubbish. So using Mac(ish)GUI
System.setProperty("com.apple.macos.use-file-dialog-packages","true") # In theory prevents access to app file structure (but doesnt seem to work)
System.setProperty("apple.awt.fileDialogForDirectories", "false")
filename = FileDialog(list_future_reminders_frame_, "Select/Create CSV file for extract (CANCEL=NO EXPORT)")
filename.setMultipleMode(False)
filename.setMode(FileDialog.SAVE)
filename.setFile(extract_filename)
if (scriptpath is not None and scriptpath != ""): filename.setDirectory(scriptpath)
# Copied from MD code... File filters only work on non Macs (or Macs below certain versions)
if (not Platform.isOSX() or not Platform.isOSXVersionAtLeast("10.13")):
extfilter = ExtFilenameFilter("csv")
filename.setFilenameFilter(extfilter) # I'm not actually sure this works...?
filename.setVisible(True)
csvfilename = filename.getFile()
if (csvfilename is None) or csvfilename == "":
lDisplayOnly = True
csvfilename = None
myPrint("B", "User chose to cancel or no file selected >> So no Extract will be performed... ")
myPopupInformationBox(list_future_reminders_frame_, "User chose to cancel or no file selected >> So no Extract will be performed... ", "FILE SELECTION")
elif str(csvfilename).endswith(".moneydance"):
myPrint("B", "User selected file:", csvfilename)
myPrint("B", "Sorry - User chose to use .moneydance extension - I will not allow it!... So no Extract will be performed...")
myPopupInformationBox(list_future_reminders_frame_, "Sorry - User chose to use .moneydance extension - I will not allow it!... So no Extract will be performed...", "FILE SELECTION")
lDisplayOnly = True
csvfilename = None
elif ".moneydance" in filename.getDirectory():
myPrint("B", "User selected file:", filename.getDirectory(), csvfilename)
myPrint("B", "Sorry - FileDialog() User chose to save file in .moneydance location. NOT Good practice so I will not allow it!... So no Extract will be performed...")
myPopupInformationBox(list_future_reminders_frame_, "Sorry - FileDialog() User chose to save file in .moneydance location. NOT Good practice so I will not allow it!... So no Extract will be performed...", "FILE SELECTION")
lDisplayOnly = True
csvfilename = None
else:
csvfilename = os.path.join(filename.getDirectory(), filename.getFile())
scriptpath = str(filename.getDirectory())
if not lDisplayOnly:
if os.path.exists(csvfilename) and os.path.isfile(csvfilename):
myPrint("DB", "WARNING: file exists,but assuming user said OK to overwrite..")
if not lDisplayOnly:
if check_file_writable(csvfilename):
if lStripASCII:
myPrint("B", 'Will display Reminders and then extract to file: ', csvfilename, "(NOTE: Should drop non utf8 characters...)")
else:
myPrint("B", 'Will display Reminders and then extract to file: ', csvfilename, "...")
scriptpath = os.path.dirname(csvfilename)
else:
myPrint("B", "Sorry - I just checked and you do not have permissions to create this file:", csvfilename)
myPopupInformationBox(list_future_reminders_frame_, "Sorry - I just checked and you do not have permissions to create this file: %s" % csvfilename, "FILE SELECTION")
csvfilename=""
lDisplayOnly = True
return
# enddef
if not lDisplayOnly: grabTheFile()
else:
pass
# endif
if csvfilename is None:
lDisplayOnly = True
myPrint("B","No Export will be performed")
# save here instead of at the end.
save_StuWareSoftSystems_parameters_to_file()
# Moneydance dates are int yyyymmddd - convert to locale date string for CSV format
def dateoutput(dateinput, theformat):
if dateinput == "EXPIRED": _dateoutput = dateinput
elif dateinput == "": _dateoutput = ""
elif dateinput == 0: _dateoutput = ""
elif dateinput == "0": _dateoutput = ""
else:
dateasdate = datetime.datetime.strptime(str(dateinput), "%Y%m%d") # Convert to Date field
_dateoutput = dateasdate.strftime(theformat)
return _dateoutput
def myGetNextOccurance(theRem, startDate, maximumDate):
cal = Calendar.getInstance()
ackPlusOne = theRem.getDateAcknowledgedInt()
if ackPlusOne > 0:
ackPlusOne = DateUtil.incrementDate(ackPlusOne, 0, 0, 1)
DateUtil.setCalendarDate(cal, Math.max(startDate, ackPlusOne))
while True:
intDate = DateUtil.convertCalToInt(cal)
if (intDate > maximumDate or (theRem.getLastDateInt() > 0 and intDate > theRem.getLastDateInt())): # noqa
return 0
if (theRem.occursOnDate(cal)):
return DateUtil.convertCalToInt(cal)
cal.add(Calendar.DAY_OF_MONTH, 1)
def build_the_data_file(ind):
global sdf, userdateformat, csvlines, csvheaderline, myScriptName, baseCurrency, headerFormats
global debug, ExtractDetails_Count, daysToLookForward_LFR
# Just override it as the sort is broken as it's sorting on strings and dd/mm/yy won't work etc - fix later
overridedateformat = "%Y/%m/%d"
ExtractDetails_Count += 1
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", ind, " - On iteration/call: ", ExtractDetails_Count)
# ind == 1 means that this is a repeat call, so the table should be refreshed
root = moneydance.getCurrentAccountBook()
baseCurrency = moneydance_data.getCurrencies().getBaseType()
rems = root.getReminders().getAllReminders()
if rems.size() < 1:
return False
myPrint("B", 'Success: read ', rems.size(), 'reminders')
print
csvheaderline = [
"Number#",
"NextDue",
# "ReminderType",
# "Frequency",
# "AutoCommitDays",
# "LastAcknowledged",
# "FirstDate",
# "EndDate",
"ReminderDescription",
"NetAmount"
# "TxfrType",
# "Account",
# "MainDescription",
# "Split#",
# "SplitAmount",
# "Category",
# "Description",
# "Memo"
]
headerFormats = [
[Number,JLabel.CENTER],
[String,JLabel.CENTER],
# [String,JLabel.LEFT],
# [String,JLabel.LEFT],
# [String,JLabel.LEFT],
# [String,JLabel.CENTER],
# [String,JLabel.CENTER],
# [String,JLabel.CENTER],
[String,JLabel.LEFT],
[Number,JLabel.RIGHT]
# [String,JLabel.LEFT],
# [String,JLabel.LEFT],
# [String,JLabel.LEFT],
# [String,JLabel.CENTER],
# [Number,JLabel.RIGHT],
# [String,JLabel.LEFT],
# [String,JLabel.LEFT],
# [String,JLabel.LEFT]
]
# Read each reminder and create a csv line for each in the csvlines array
csvlines = [] # Set up an empty array
for index in range(0, int(rems.size())):
rem = rems[index] # Get the reminder
remtype = rem.getReminderType() # NOTE or TRANSACTION
desc = rem.getDescription().replace(",", " ") # remove commas to keep csv format happy
# memo = str(rem.getMemo()).replace(",", " ").strip() # remove commas to keep csv format happy
# memo = str(memo).replace("\n", "*").strip() # remove newlines to keep csv format happy
myPrint("P", "Reminder: ", index + 1, rem.getDescription()) # Name of Reminder
# determine the frequency of the transaction
daily = rem.getRepeatDaily()
weekly = rem.getRepeatWeeklyModifier()
monthly = rem.getRepeatMonthlyModifier()
yearly = rem.getRepeatYearly()
countfreqs = 0
remfreq = ''
if daily > 0:
remfreq += 'DAILY'
remfreq += '(every ' + str(daily) + ' days)'
countfreqs += 1
if len(rem.getRepeatWeeklyDays()) > 0 and rem.getRepeatWeeklyDays()[0] > 0:
for freq in range(0, len(rem.getRepeatWeeklyDays())):
if len(remfreq) > 0: remfreq += " & "
if weekly == Reminder.WEEKLY_EVERY: remfreq += 'WEEKLY_EVERY'
if weekly == Reminder.WEEKLY_EVERY_FIFTH: remfreq += 'WEEKLY_EVERY_FIFTH'
if weekly == Reminder.WEEKLY_EVERY_FIRST: remfreq += 'WEEKLY_EVERY_FIRST'
if weekly == Reminder.WEEKLY_EVERY_FOURTH: remfreq += 'WEEKLY_EVERY_FOURTH'
if weekly == Reminder.WEEKLY_EVERY_LAST: remfreq += 'WEEKLY_EVERY_LAST'
if weekly == Reminder.WEEKLY_EVERY_SECOND: remfreq += 'WEEKLY_EVERY_SECOND'
if weekly == Reminder.WEEKLY_EVERY_THIRD: remfreq += 'WEEKLY_EVERY_THIRD'
if rem.getRepeatWeeklyDays()[freq] == 1: remfreq += '(on Sunday)'
if rem.getRepeatWeeklyDays()[freq] == 2: remfreq += '(on Monday)'
if rem.getRepeatWeeklyDays()[freq] == 3: remfreq += '(on Tuesday)'
if rem.getRepeatWeeklyDays()[freq] == 4: remfreq += '(on Wednesday)'
if rem.getRepeatWeeklyDays()[freq] == 5: remfreq += '(on Thursday)'
if rem.getRepeatWeeklyDays()[freq] == 6: remfreq += '(on Friday)'
if rem.getRepeatWeeklyDays()[freq] == 7: remfreq += '(on Saturday)'
if rem.getRepeatWeeklyDays()[freq] < 1 or rem.getRepeatWeeklyDays()[
freq] > 7: remfreq += '(*ERROR*)'
countfreqs += 1
if len(rem.getRepeatMonthly()) > 0 and rem.getRepeatMonthly()[0] > 0:
for freq in range(0, len(rem.getRepeatMonthly())):
if len(remfreq) > 0: remfreq += " & "
if monthly == Reminder.MONTHLY_EVERY: remfreq += 'MONTHLY_EVERY'
if monthly == Reminder.MONTHLY_EVERY_FOURTH: remfreq += 'MONTHLY_EVERY_FOURTH'
if monthly == Reminder.MONTHLY_EVERY_OTHER: remfreq += 'MONTHLY_EVERY_OTHER'
if monthly == Reminder.MONTHLY_EVERY_SIXTH: remfreq += 'MONTHLY_EVERY_SIXTH'
if monthly == Reminder.MONTHLY_EVERY_THIRD: remfreq += 'MONTHLY_EVERY_THIRD'
theday = rem.getRepeatMonthly()[freq]
if theday == Reminder.LAST_DAY_OF_MONTH:
remfreq += '(on LAST_DAY_OF_MONTH)'
else:
if 4 <= theday <= 20 or 24 <= theday <= 30: suffix = "th"
else: suffix = ["st", "nd", "rd"][theday % 10 - 1]
remfreq += '(on ' + str(theday) + suffix + ')'
countfreqs += 1
if yearly:
if len(remfreq) > 0: remfreq += " & "
remfreq += 'YEARLY'
countfreqs += 1
if len(remfreq) < 1 or countfreqs == 0: remfreq = '!ERROR! NO ACTUAL FREQUENCY OPTIONS SET PROPERLY ' + remfreq
if countfreqs > 1: remfreq = "**MULTI** " + remfreq # noqa
todayInt = DateUtil.getStrippedDateInt()
lastdate = rem.getLastDateInt()
if lastdate < 1: # Detect if an enddate is set
stopDate = min(DateUtil.incrementDate(todayInt, 0, 0, daysToLookForward_LFR),20991231)
nextDate = rem.getNextOccurance(stopDate) # Use cutoff far into the future
else:
stopDate = min(DateUtil.incrementDate(todayInt, 0, 0, daysToLookForward_LFR),lastdate)
nextDate = rem.getNextOccurance(stopDate) # Stop at enddate
if nextDate < 1:
continue
# nextDate = DateUtil.incrementDate(nextDate, 0, 0, -1)
loopDetector=0
while True:
loopDetector+=1
if loopDetector > 10000:
myPrint("B","Loop detected..? Breaking out.... Reminder %s" %(rem))
myPopupInformationBox(list_future_reminders_frame_,"ERROR - Loop detected..?! Will exit (review console log)",theMessageType=JOptionPane.ERROR_MESSAGE)
raise Exception("Loop detected..? Aborting.... Reminder %s" %(rem))
calcNext = myGetNextOccurance(rem,nextDate, stopDate)
if calcNext < 1:
break
remdate = str(calcNext)
# nextDate = DateUtil.incrementDate(calcNext, 0, 0, 1)
nextDate = DateUtil.incrementDate(calcNext, 0, 0, 1)
lastack = rem.getDateAcknowledgedInt()
if lastack == 0 or lastack == 19700101: lastack = '' # noqa
auto = rem.getAutoCommitDays()
if auto >= 0: auto = 'YES: (' + str(auto) + ' days before scheduled)' # noqa
else: auto = 'NO' # noqa
if str(remtype) == 'NOTE':
csvline = []
csvline.append(index + 1)
csvline.append(dateoutput(remdate, overridedateformat))
# csvline.append(str(rem.getReminderType()))
# csvline.append(remfreq)
# csvline.append(auto)
# csvline.append(dateoutput(lastack, overridedateformat))
# csvline.append(dateoutput(rem.getInitialDateInt(), overridedateformat))
# csvline.append(dateoutput(lastdate, overridedateformat))
csvline.append(desc)
csvline.append('') # NetAmount
# csvline.append('') # TxfrType
# csvline.append('') # Account
# csvline.append('') # MainDescription
# csvline.append(str(index + 1) + '.0') # Split#
# csvline.append('') # SplitAmount
# csvline.append('') # Category
# csvline.append('') # Description
# csvline.append('"' + memo + '"') # Memo
csvlines.append(csvline)
elif str(remtype) == 'TRANSACTION':
txnparent = rem.getTransaction()
amount = baseCurrency.getDoubleValue(txnparent.getValue())
# for index2 in range(0, int(txnparent.getOtherTxnCount())):
for index2 in [0]:
# splitdesc = txnparent.getOtherTxn(index2).getDescription().replace(","," ") # remove commas to keep csv format happy
# splitmemo = txnparent.getMemo().replace(",", " ") # remove commas to keep csv format happy
# maindesc = txnparent.getDescription().replace(",", " ").strip()
if index2 > 0: amount = '' # Don't repeat the new amount on subsequent split lines (so you can total column). The split amount will be correct
# stripacct = str(txnparent.getAccount()).replace(",",
# " ").strip() # remove commas to keep csv format happy
# stripcat = str(txnparent.getOtherTxn(index2).getAccount()).replace(","," ").strip() # remove commas to keep csv format happy
csvline = []
csvline.append(index + 1)
csvline.append(dateoutput(remdate, overridedateformat))
# csvline.append(str(rem.getReminderType()))
# csvline.append(remfreq)
# csvline.append(auto)
# csvline.append(dateoutput(lastack, overridedateformat))
# csvline.append(dateoutput(rem.getInitialDateInt(), overridedateformat))
# csvline.append(dateoutput(lastdate, overridedateformat))
csvline.append(desc)
csvline.append((amount))
# csvline.append(txnparent.getTransferType())
# csvline.append(stripacct)
# csvline.append(maindesc)
# csvline.append(str(index + 1) + '.' + str(index2 + 1))
# csvline.append(baseCurrency.getDoubleValue(txnparent.getOtherTxn(index2).getValue()) * -1)
# csvline.append(stripcat)
# csvline.append(splitdesc)
# csvline.append(splitmemo)
csvlines.append(csvline)
index += 1
# if len(csvlines) < 1:
# return False
#
ReminderTable(csvlines, ind)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name)
ExtractDetails_Count -= 1
return True
# ENDDEF
# Synchronises column widths of both JTables
class ColumnChangeListener(TableColumnModelListener):
sourceTable = None
targetTable = None
def __init__(self, source):
self.sourceTable = source
def columnAdded(self, e): pass
def columnSelectionChanged(self, e): pass
def columnRemoved(self, e): pass
def columnMoved(self, e): pass
# noinspection PyUnusedLocal
def columnMarginChanged(self, e):
global _column_widths_LFR
sourceModel = self.sourceTable.getColumnModel()
for _i in range(0, sourceModel.getColumnCount()):
# Saving for later... Yummy!!
_column_widths_LFR[_i] = sourceModel.getColumn(_i).getWidth()
myPrint("D","Saving column %s as width %s for later..." %(_i,_column_widths_LFR[_i]))
# The javax.swing package and its subpackages provide a fairly comprehensive set of default renderer implementations, suitable for customization via inheritance. A notable omission is the lack #of a default renderer for a JTableHeader in the public API. The renderer used by default is a Sun proprietary class, sun.swing.table.DefaultTableCellHeaderRenderer, which cannot be extended.
# DefaultTableHeaderCellRenderer seeks to fill this void, by providing a rendering designed to be identical with that of the proprietary class, with one difference: the vertical alignment of #the header text has been set to BOTTOM, to provide a better match between DefaultTableHeaderCellRenderer and other custom renderers.
# The name of the class has been chosen considering this to be a default renderer for the cells of a table header, and not the table cells of a header as implied by the proprietary class name
class DefaultTableHeaderCellRenderer(DefaultTableCellRenderer):
# /**
# * Constructs a <code>DefaultTableHeaderCellRenderer</code>.
# * <P>
# * The horizontal alignment and text position are set as appropriate to a
# * table header cell, and the opaque property is set to false.
# */
def __init__(self):
# super(DefaultTableHeaderCellRenderer, self).__init__()
self.setHorizontalAlignment(JLabel.CENTER) # This one changes the text alignment
self.setHorizontalTextPosition(JLabel.RIGHT) # This positions the text to the left/right of the sort icon
self.setVerticalAlignment(JLabel.BOTTOM)
self.setOpaque(True) # if this is false then it hides the background colour
# enddef
# /**
# * returns the default table header cell renderer.
# * <P>
# * If the column is sorted, the appropriate icon is retrieved from the
# * current Look and Feel, and a border appropriate to a table header cell
# * is applied.
# * <P>
# * Subclasses may overide this method to provide custom content or
# * formatting.
# *
# * @param table the <code>JTable</code>.
# * @param value the value to assign to the header cell
# * @param isSelected This parameter is ignored.
# * @param hasFocus This parameter is ignored.
# * @param row This parameter is ignored.
# * @param column the column of the header cell to render
# * @return the default table header cell renderer
# */
def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column): # noqa
# noinspection PyUnresolvedReferences
super(DefaultTableHeaderCellRenderer, self).getTableCellRendererComponent(table, value, isSelected,hasFocus, row, column)
# tableHeader = table.getTableHeader()
# if (tableHeader is not None): self.setForeground(tableHeader.getForeground())
align = table.getCellRenderer(0, column).getHorizontalAlignment()
self.setHorizontalAlignment(align)
if align == JLabel.RIGHT:
self.setHorizontalTextPosition(JLabel.RIGHT)
elif align == JLabel.LEFT:
self.setHorizontalTextPosition(JLabel.LEFT)
elif align == JLabel.CENTER:
self.setHorizontalTextPosition(JLabel.LEFT)
self.setIcon(self._getIcon(table, column))
self.setBorder(UIManager.getBorder("TableHeader.cellBorder"))
self.setForeground(Color.BLACK)
self.setBackground(Color.lightGray)
# self.setHorizontalAlignment(JLabel.CENTER)
return self
# enddef
# /**
# * Overloaded to return an icon suitable to the primary sorted column, or null if
# * the column is not the primary sort key.
# *
# * @param table the <code>JTable</code>.
# * @param column the column index.
# * @return the sort icon, or null if the column is unsorted.
# */
def _getIcon(self, table, column): # noqa
sortKey = self.getSortKey(table, column)
if (sortKey is not None and table.convertColumnIndexToView(sortKey.getColumn()) == column):
x = (sortKey.getSortOrder())
if x == SortOrder.ASCENDING: return UIManager.getIcon("Table.ascendingSortIcon")
elif x == SortOrder.DESCENDING: return UIManager.getIcon("Table.descendingSortIcon")
elif x == SortOrder.UNSORTED: return UIManager.getIcon("Table.naturalSortIcon")
return None
# enddef
# /**
# * returns the current sort key, or null if the column is unsorted.
# *
# * @param table the table
# * @param column the column index
# * @return the SortKey, or null if the column is unsorted
# */
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def getSortKey(self, table, column): # noqa
rowSorter = table.getRowSorter()
if (rowSorter is None): return None
sortedColumns = rowSorter.getSortKeys()
if (sortedColumns.size() > 0): return sortedColumns.get(0)
return None
focus = "initial"
row = 0
EditedReminderCheck = False
ReminderTable_Count = 0
ExtractDetails_Count = 0
class MyRunnable(Runnable):
def __init__(self, theFrame):
self.theFrame = theFrame
# noinspection PyMethodMayBeStatic
def run(self):
global debug
myPrint("DB","Inside %s MyRunnable.... About to trigger WindowClosing event to close %s" %(myModuleID,myModuleID))
try:
self.theFrame.dispatchEvent(WindowEvent(self.theFrame, WindowEvent.WINDOW_CLOSING))
myPrint("DB","Back from pushing a WINDOW_CLOSING Event to %s...." %myModuleID)
except:
dump_sys_error_to_md_console_and_errorlog()
myPrint("B","@@ ERROR pushing a WINDOW_CLOSING Event to %s.... :-< " %myModuleID)
return
class MyEventListener(AppEventListener):
def __init__(self, theFrame):
self.alreadyClosed = False
self.theFrame = theFrame
self.myModuleID = myModuleID
def getMyself(self):
fm = moneydance.getModuleForID(self.myModuleID)
if fm is None: return None, None
try:
pyo = fm.getClass().getDeclaredField("extensionObject")
pyo.setAccessible(True)
pyObject = pyo.get(fm)
pyo.setAccessible(False)
except:
myPrint("DB","Error retrieving my own Python extension object..?")
dump_sys_error_to_md_console_and_errorlog()
return None, None
return fm, pyObject
# noinspection PyMethodMayBeStatic
def handleEvent(self, appEvent):
global debug
myPrint("DB", "I am .handleEvent() within %s" %(classPrinter("MoneydanceAppListener", self.theFrame.MoneydanceAppListener)))
if self.alreadyClosed:
myPrint("DB","....I'm actually still here (MD EVENT %s CALLED).. - Ignoring and returning back to MD...." %(appEvent))
return
# MD doesn't call .unload() or .cleanup(), so if uninstalled I need to close myself
fm, pyObject = self.getMyself()
myPrint("DB", "Checking myself: %s : %s" %(fm, pyObject))
# if (fm is None or pyObject is None) and appEvent != "md:app:exiting":
if (fm is None or (self.theFrame.isRunTimeExtension and pyObject is None)) and appEvent != "md:app:exiting":
myPrint("B", "@@ ALERT - I've detected that I'm no longer installed as an extension - I will deactivate.. (switching event code to :close)")
appEvent = "%s:customevent:close" %self.myModuleID
# I am only closing Toolbox when a new Dataset is opened.. I was calling it on MD Close/Exit, but it seemed to cause an Exception...
if (appEvent == "md:file:closing"
or appEvent == "md:file:closed"
or appEvent == "md:file:opening"
or appEvent == "md:app:exiting"):
myPrint("DB","@@ Ignoring MD handleEvent: %s" %(appEvent))
elif (appEvent == "md:file:opened" or appEvent == "%s:customevent:close" %self.myModuleID):
if debug:
myPrint("DB","MD event %s triggered.... Will call MyRunnable (on a new Thread) to push a WINDOW_CLOSING Event to %s to close itself (while I exit back to MD quickly) ...." %(appEvent, self.myModuleID))
else:
myPrint("B","Moneydance triggered event %s triggered - So I am closing %s now...." %(appEvent, self.myModuleID))
self.alreadyClosed = True
try:
t = Thread(MyRunnable(self.theFrame))
t.start()
myPrint("DB","Back from calling MyRunnable to push a WINDOW_CLOSING Event to %s.... ;-> ** I'm getting out quick! **" %(self.myModuleID))
except:
dump_sys_error_to_md_console_and_errorlog()
myPrint("B","@@ ERROR calling MyRunnable to push a WINDOW_CLOSING Event to %s.... :-< ** I'm getting out quick! **" %(self.myModuleID))
if not debug: myPrint("DB","Returning back to Moneydance after calling for %s to close...." %self.myModuleID)
return
myPrint("DB","@@ Detected MD handleEvent: %s" %(appEvent))
# md:file:closing The Moneydance file is being closed
# md:file:closed The Moneydance file has closed
# md:file:opening The Moneydance file is being opened
# md:file:opened The Moneydance file has opened
# md:file:presave The Moneydance file is about to be saved
# md:file:postsave The Moneydance file has been saved
# md:app:exiting Moneydance is shutting down
# md:account:select An account has been selected by the user
# md:account:root The root account has been selected
# md:graphreport An embedded graph or report has been selected
# md:viewbudget One of the budgets has been selected
# md:viewreminders One of the reminders has been selected
# md:licenseupdated The user has updated the license
class WindowListener(WindowAdapter):
def __init__(self, theFrame):
self.theFrame = theFrame # type: MyJFrame
def windowClosing(self, WindowEvent): # noqa
global debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
terminate_script()
def windowClosed(self, WindowEvent): # noqa
global debug
myPrint("D","In ", inspect.currentframe().f_code.co_name, "()")
self.theFrame.isActiveInMoneydance = False
myPrint("DB","applistener is %s" %(classPrinter("MoneydanceAppListener", self.theFrame.MoneydanceAppListener)))
if self.theFrame.MoneydanceAppListener is not None:
try:
moneydance.removeAppEventListener(self.theFrame.MoneydanceAppListener)
myPrint("DB","\n@@@ Removed my MD App Listener... %s\n" %(classPrinter("MoneydanceAppListener", self.theFrame.MoneydanceAppListener)))
self.theFrame.MoneydanceAppListener = None
except:
myPrint("B","FAILED to remove my MD App Listener... %s" %(classPrinter("MoneydanceAppListener", self.theFrame.MoneydanceAppListener)))
dump_sys_error_to_md_console_and_errorlog()
if self.theFrame.HomePageViewObj is not None:
self.theFrame.HomePageViewObj.unload()
myPrint("DB","@@ Called HomePageView.unload() and Removed reference to HomePageView %s from MyJFrame()...@@\n" %(classPrinter("HomePageView", self.theFrame.HomePageViewObj)))
self.theFrame.HomePageViewObj = None
myPrint("D","Exit ", inspect.currentframe().f_code.co_name, "()")
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def windowGainedFocus(self, WindowEvent): # noqa
global focus, table, row, debug, EditedReminderCheck
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
if focus == "lost":
focus = "gained"
if EditedReminderCheck: # Disable refresh data on all gained-focus events, just refresh if Reminder is Edited...
# To always refresh data remove this if statement and always run ExtractDetails(1)
myPrint("DB", "pre-build_the_data_file()")
build_the_data_file(1) # Re-extract data when window focus gained - assume something changed
myPrint("DB", "back from build_the_data_file(), gained focus, row: ", row)
EditedReminderCheck = False
if table.getRowCount() > 0:
table.setRowSelectionInterval(0, row)
cellRect = table.getCellRect(row, 0, True)
table.scrollRectToVisible(cellRect) # force the scrollpane to make the row visible
table.requestFocus()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def windowLostFocus(self, WindowEvent): # noqa
global focus, table, row, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
row = table.getSelectedRow()
if focus == "gained": focus = "lost"
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
WL = WindowListener(list_future_reminders_frame_)
class MouseListener(MouseAdapter):
# noinspection PyMethodMayBeStatic
def mousePressed(self, event):
global table, row, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
clicks = event.getClickCount()
if clicks == 2:
row = table.getSelectedRow()
index = table.getValueAt(row, 0)
ShowEditForm(index)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
ML = MouseListener()
class EnterAction(AbstractAction):
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def actionPerformed(self, event):
global focus, table, row, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
row = table.getSelectedRow()
index = table.getValueAt(row, 0)
ShowEditForm(index)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
class CloseAction(AbstractAction):
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def actionPerformed(self, event):
global list_future_reminders_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
terminate_script()
return
class ExtractMenuAction():
def __init__(self):
pass
# noinspection PyMethodMayBeStatic
def extract_or_close(self):
global list_future_reminders_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
myPrint("D", "inside ExtractMenuAction() ;->")
terminate_script()
class RefreshMenuAction():
def __init__(self):
pass
# noinspection PyMethodMayBeStatic
def refresh(self):
global list_future_reminders_frame_, table, row, debug
row = 0 # reset to row 1
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "\npre-extract details(1), row: ", row)
build_the_data_file(1) # Re-extract data
myPrint("D", "back from extractdetails(1), row: ", row)
if table.getRowCount() > 0:
table.setRowSelectionInterval(0, row)
table.requestFocus()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
class MyJTable(JTable):
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
def __init__(self, tableModel):
global debug
super(JTable, self).__init__(tableModel)
self.fixTheRowSorter()
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def isCellEditable(self, row, column): # noqa
return False
# Rendering depends on row (i.e. security's currency) as well as column
# noinspection PyUnusedLocal
# noinspection PyMethodMayBeStatic
def getCellRenderer(self, row, column): # noqa
global headerFormats
if column == 0:
renderer = MyPlainNumberRenderer()
elif headerFormats[column][0] == Number:
renderer = MyNumberRenderer()
else:
renderer = DefaultTableCellRenderer()
renderer.setHorizontalAlignment(headerFormats[column][1])
return renderer
class MyTextNumberComparator(Comparator):
lSortNumber = False
lSortRealNumber = False
def __init__(self, sortType):
if sortType == "N":
self.lSortNumber = True
elif sortType == "%":
self.lSortRealNumber = True
else:
self.lSortNumber = False
def compare(self, str1, str2):
global decimalCharSep
validString = "-0123456789" + decimalCharSep # Yes this will strip % sign too, but that still works
# if debug: print str1, str2, self.lSortNumber, self.lSortRealNumber, type(str1), type(str2)
if isinstance(str1, (float,int)) or isinstance(str2,(float,int)):
if str1 is None or str1 == "": str1 = 0
if str2 is None or str2 == "": str2 = 0
if (str1) > (str2):
return 1
elif str1 == str2:
return 0
else:
return -1
if self.lSortNumber:
# strip non numerics from string so can convert back to float - yes, a bit of a reverse hack
conv_string1 = ""
if str1 is None or str1 == "": str1 = "0"
if str2 is None or str2 == "": str2 = "0"
for char in str1:
if char in validString:
conv_string1 = conv_string1 + char
conv_string2 = ""
for char in str2:
if char in validString:
conv_string2 = conv_string2 + char
str1 = float(conv_string1)
str2 = float(conv_string2)
if str1 > str2:
return 1
elif str1 == str2:
return 0
else:
return -1
elif self.lSortRealNumber:
if float(str1) > float(str2):
return 1
elif str1 == str2:
return 0
else:
return -1
else:
if str1.upper() > str2.upper():
return 1
elif str1.upper() == str2.upper():
return 0
else:
return -1
# enddef
def fixTheRowSorter(self): # by default everything gets converted to strings. We need to fix this and code for my string number formats
sorter = TableRowSorter()
self.setRowSorter(sorter)
sorter.setModel(self.getModel())
for _i in range(0, self.getColumnCount()):
if _i == 0:
sorter.setComparator(_i, self.MyTextNumberComparator("%"))
if _i == 3 or _i == 3:
sorter.setComparator(_i, self.MyTextNumberComparator("N"))
else:
sorter.setComparator(_i, self.MyTextNumberComparator("T"))
self.getRowSorter().toggleSortOrder(1)
# make Banded rows
def prepareRenderer(self, renderer, row, column): # noqa
lightLightGray = Color(0xDCDCDC)
# noinspection PyUnresolvedReferences
component = super(MyJTable, self).prepareRenderer(renderer, row, column)
if not self.isRowSelected(row):
component.setBackground(self.getBackground() if row % 2 == 0 else lightLightGray)
return component
# This copies the standard class and just changes the colour to RED if it detects a negative - leaves field intact
# noinspection PyArgumentList
class MyNumberRenderer(DefaultTableCellRenderer):
global baseCurrency
def __init__(self):
super(DefaultTableCellRenderer, self).__init__()
def setValue(self, value):
global decimalCharSep
myGreen = Color(0,102,0)
if isinstance(value, (float,int)):
if value < 0.0:
self.setForeground(Color.RED)
else:
# self.setForeground(Color.DARK_GRAY)
self.setForeground(myGreen) # DARK_GREEN
self.setText(baseCurrency.formatFancy(int(value*100), decimalCharSep, True))
else:
self.setText(str(value))
return
# noinspection PyArgumentList
class MyPlainNumberRenderer(DefaultTableCellRenderer):
global baseCurrency
def __init__(self):
super(DefaultTableCellRenderer, self).__init__()
def setValue(self, value):
self.setText(str(value))
return
def ReminderTable(tabledata, ind):
global list_future_reminders_frame_, scrollpane, table, row, debug, ReminderTable_Count, csvheaderline, lDisplayOnly
global _column_widths_LFR, daysToLookForward_LFR, saveStatusLabel
ReminderTable_Count += 1
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", ind, " - On iteration/call: ", ReminderTable_Count)
myDefaultWidths = [0,95,400,100]
validCount=0
lInvalidate=True
if _column_widths_LFR is not None and isinstance(_column_widths_LFR,(list)) and len(_column_widths_LFR) == len(myDefaultWidths):
# if sum(_column_widths_LFR)<1:
for width in _column_widths_LFR:
if width >= 0 and width <= 1000: # noqa
validCount += 1
if validCount == len(myDefaultWidths): lInvalidate=False
if lInvalidate:
myPrint("DB","Found invalid saved columns = resetting to defaults")
myPrint("DB","Found: %s" %_column_widths_LFR)
myPrint("DB","Resetting to: %s" %myDefaultWidths)
_column_widths_LFR = myDefaultWidths
else:
myPrint("DB","Valid column widths loaded - Setting to: %s" %_column_widths_LFR)
myDefaultWidths = _column_widths_LFR
# allcols = col0 + col1 + col2 + col3 + col4 + col5 + col6 + col7 + col8 + col9 + col10 + col11 + col12 + col13 + col14 + col15 + col16 + col17
allcols = sum(myDefaultWidths)
screenSize = Toolkit.getDefaultToolkit().getScreenSize()
# button_width = 220
# button_height = 40
# frame_width = min(screenSize.width-20, allcols + 100)
# frame_height = min(screenSize.height, 900)
frame_width = min(screenSize.width-20, max(1024,int(round(moneydance_ui.firstMainFrame.getSize().width *.95,0))))
frame_height = min(screenSize.height-20, max(768, int(round(moneydance_ui.firstMainFrame.getSize().height *.95,0))))
frame_width = min( allcols+20, frame_width)
# panel_width = frame_width - 50
# button_panel_height = button_height + 5
if ind == 1: scrollpane.getViewport().remove(table) # On repeat, just remove/refresh the table & rebuild the viewport
colnames = csvheaderline
table = MyJTable(DefaultTableModel(tabledata, colnames))
if ind == 0: # Function can get called multiple times; only set main frames up once
JFrame.setDefaultLookAndFeelDecorated(True)
# list_future_reminders_frame_ = JFrame("Listing future reminders - StuWareSoftSystems(build: %s)..." % version_build)
list_future_reminders_frame_.setTitle("Listing future reminders - StuWareSoftSystems(build: %s)..." % version_build)
list_future_reminders_frame_.setName("%s_main" %(myModuleID))
# list_future_reminders_frame_.setLayout(FlowLayout())
if (not Platform.isMac()):
moneydance_ui.getImages()
list_future_reminders_frame_.setIconImage(MDImages.getImage(moneydance_ui.getMain().getSourceInformation().getIconResource()))
# list_future_reminders_frame_.setPreferredSize(Dimension(frame_width, frame_height))
# frame.setExtendedState(JFrame.MAXIMIZED_BOTH)
list_future_reminders_frame_.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE)
shortcut = Toolkit.getDefaultToolkit().getMenuShortcutKeyMaskEx()
# Add standard CMD-W keystrokes etc to close window
list_future_reminders_frame_.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_W, shortcut), "close-window")
list_future_reminders_frame_.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_F4, shortcut), "close-window")
list_future_reminders_frame_.getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW).put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), "close-window")
list_future_reminders_frame_.getRootPane().getActionMap().put("close-window", CloseAction())
list_future_reminders_frame_.addWindowFocusListener(WL)
list_future_reminders_frame_.addWindowListener(WL)
if Platform.isOSX():
save_useScreenMenuBar= System.getProperty("apple.laf.useScreenMenuBar")
System.setProperty("apple.laf.useScreenMenuBar", "false")
mb = JMenuBar()
menuO = JMenu("<html><B>OPTIONS</b></html>")
menuItemR = JMenuItem("Refresh Data/Default Sort")
menuItemR.setToolTipText("Refresh (re-extract) the data, revert to default sort order....")
menuItemR.addActionListener(DoTheMenu(menuO))
menuItemR.setEnabled(True)
menuO.add(menuItemR)
menuItemL = JMenuItem("Change look forward days")
menuItemL.setToolTipText("Change the days to look forward")
menuItemL.addActionListener(DoTheMenu(menuO))
menuItemL.setEnabled(True)
menuO.add(menuItemL)
menuItemRC = JMenuItem("Reset default Column Widths")
menuItemRC.setToolTipText("Reset default Column Widths")
menuItemRC.addActionListener(DoTheMenu(menuO))
menuItemRC.setEnabled(True)
menuO.add(menuItemRC)
menuItemDEBUG = JCheckBoxMenuItem("Debug")
menuItemDEBUG.addActionListener(DoTheMenu(menuO))
menuItemDEBUG.setToolTipText("Enables script to output debug information (internal technical stuff)")
menuItemDEBUG.setSelected(debug)
menuO.add(menuItemDEBUG)
menuItemE = JMenuItem("Close Window")
menuItemE.setToolTipText("Exit and close the window")
menuItemE.addActionListener(DoTheMenu(menuO))
menuItemE.setEnabled(True)
menuO.add(menuItemE)
mb.add(menuO)
menuH = JMenu("<html><B>ABOUT</b></html>")
menuItemA = JMenuItem("About")
menuItemA.setToolTipText("About...")
menuItemA.addActionListener(DoTheMenu(menuH))
menuItemA.setEnabled(True)
menuH.add(menuItemA)
mb.add(menuH)
# mb.add(Box.createHorizontalGlue())
mb.add(Box.createRigidArea(Dimension(40, 0)))
formatDate = DateUtil.incrementDate(DateUtil.getStrippedDateInt(),0,0,daysToLookForward_LFR)
formatDate = str(formatDate/10000).zfill(4) + "-" + str((formatDate/100)%100).zfill(2) + "-" + str(formatDate%100).zfill(2)
lblDays = JLabel("** Looking forward %s days to %s **" %(daysToLookForward_LFR, formatDate))
lblDays.setBackground(Color.WHITE)
lblDays.setForeground(Color.RED)
mb.add(lblDays)
saveStatusLabel = lblDays
# mb.add(Box.createRigidArea(Dimension(20, 0)))
list_future_reminders_frame_.setJMenuBar(mb)
if Platform.isOSX():
System.setProperty("apple.laf.useScreenMenuBar", save_useScreenMenuBar) # noqa
table.getTableHeader().setReorderingAllowed(True) # no more drag and drop columns, it didn't work (on the footer)
table.getTableHeader().setDefaultRenderer(DefaultTableHeaderCellRenderer())
table.selectionMode = ListSelectionModel.SINGLE_SELECTION
fontSize = table.getFont().getSize()+5
table.setRowHeight(fontSize)
table.setRowMargin(0)
table.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke("ENTER"), "Enter")
table.getActionMap().put("Enter", EnterAction())
for _i in range(0, table.getColumnModel().getColumnCount()):
tcm = table.getColumnModel().getColumn(_i)
tcm.setPreferredWidth(myDefaultWidths[_i])
if myDefaultWidths[_i] == 0:
tcm.setMinWidth(0)
tcm.setMaxWidth(0)
tcm.setWidth(0)
cListener1 = ColumnChangeListener(table)
# Put the listener here - else it sets the defaults wrongly above....
table.getColumnModel().addColumnModelListener(cListener1)
table.getTableHeader().setBackground(Color.LIGHT_GRAY)
# table.setAutoCreateRowSorter(True) # DON'T DO THIS - IT WILL OVERRIDE YOUR NICE CUSTOM SORT
table.addMouseListener(ML)
if ind == 0:
scrollpane = JScrollPane(JScrollPane.VERTICAL_SCROLLBAR_ALWAYS, JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS) # On first call, create the scrollpane
scrollpane.setBorder(CompoundBorder(MatteBorder(1, 1, 1, 1, Color.gray), EmptyBorder(0, 0, 0, 0)))
# scrollpane.setPreferredSize(Dimension(frame_width-20, frame_height-20 ))
table.setPreferredScrollableViewportSize(Dimension(frame_width-20, frame_height-100))
#
table.setAutoResizeMode(JTable.AUTO_RESIZE_OFF)
#
scrollpane.setViewportView(table)
if ind == 0:
list_future_reminders_frame_.add(scrollpane)
list_future_reminders_frame_.pack()
list_future_reminders_frame_.setLocationRelativeTo(None)
try:
list_future_reminders_frame_.MoneydanceAppListener = MyEventListener(list_future_reminders_frame_)
moneydance.addAppEventListener(list_future_reminders_frame_.MoneydanceAppListener)
myPrint("DB","@@ added AppEventListener() %s @@" %(classPrinter("MoneydanceAppListener", list_future_reminders_frame_.MoneydanceAppListener)))
except:
myPrint("B","FAILED to add MD App Listener...")
dump_sys_error_to_md_console_and_errorlog()
list_future_reminders_frame_.isActiveInMoneydance = True
if True or Platform.isOSX():
# list_future_reminders_frame_.setAlwaysOnTop(True)
list_future_reminders_frame_.toFront()
list_future_reminders_frame_.setVisible(True)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
ReminderTable_Count -= 1
return
def FormatAmount(oldamount):
# Amount is held as an integer in pence
# Remove - sign if present
if oldamount < 0:
oldamount = oldamount * -1
oldamount = str(oldamount)
# Ensure at least 3 character
if len(oldamount) < 3:
oldamount = "000" + oldamount
oldamount = (oldamount)[-3:]
# Extract whole portion of amount
whole = (oldamount)[0:-2]
if len(whole) == 0:
whole = "0"
# Extract decimal part of amount
decimal = (oldamount)[-2:]
declen = len(decimal)
if declen == 0:
decimal = "00"
whole = "0"
if declen == 1:
decimal = "0" + decimal
whole = "0"
# Insert , commas in whole part
wholelist = list(whole)
listlen = len(wholelist)
if wholelist[0] == "-":
listlen = listlen - 1
listpos = 3
while listpos < listlen:
wholelist.insert(-listpos, ",")
listpos = listpos + 4
listlen = listlen + 1
newwhole = "".join(wholelist)
newamount = newwhole + "." + decimal
return newamount
def FormatDate(olddate):
# Date is held as an integer in format YYYYMMDD
olddate = str(olddate)
if len(olddate) < 8:
olddate = "00000000"
year = olddate[0:4]
month = olddate[4:6]
day = olddate[6:8]
newdate = day + "/" + month + "/" + year
if newdate == "00/00/0000":
newdate = "Unavailable"
return newdate
def ShowEditForm(item):
global debug, EditedReminderCheck
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
reminders = moneydance_data.getReminders()
reminder = reminders.getAllReminders()[item-1]
myPrint("D", "Calling MD EditRemindersWindow() function...")
EditRemindersWindow.editReminder(None, moneydance_ui, reminder)
EditedReminderCheck = True
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
if build_the_data_file(0):
# saveStatusLabel = None
#
focus = "gained" # noqa
if table.getRowCount() > 0:
table.setRowSelectionInterval(0, row)
table.requestFocus()
else:
myPopupInformationBox(list_future_reminders_frame_, "You have no reminders to display!", myScriptName)
# ENDDEF
if not list_future_reminders_frame_.isActiveInMoneydance:
destroyOldFrames(myModuleID)
myPrint("B", "StuWareSoftSystems - %s script ending......" %myScriptName)
| 40.130511
| 386
| 0.676022
|
67b4fe96574a0f27f540c675daec327b9241132d
| 8,910
|
py
|
Python
|
gscommands.py
|
charlievieth/GoSubl
|
07dec6a5fef09e4e1d686cfe5d586edcda003381
|
[
"MIT"
] | null | null | null |
gscommands.py
|
charlievieth/GoSubl
|
07dec6a5fef09e4e1d686cfe5d586edcda003381
|
[
"MIT"
] | 1
|
2020-11-06T22:01:04.000Z
|
2020-11-06T22:01:04.000Z
|
gscommands.py
|
charlievieth/GoSubl
|
07dec6a5fef09e4e1d686cfe5d586edcda003381
|
[
"MIT"
] | null | null | null |
from gosubl import gs
from gosubl import gspatch
from gosubl import mg9
import datetime
import os
import re
import sublime
import sublime_plugin
DOMAIN = "GoSublime"
GENERATED_CODE_RE = re.compile(
r"^\/\/ Code generated .* DO NOT EDIT\.$",
re.MULTILINE,
)
# TODO: make this configurable
FMT_IGNORED_EXTS = tuple([
".pb.go"
])
class GsCommentForwardCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("toggle_comment", {"block": False})
self.view.run_command("move", {"by": "lines", "forward": True})
class GsStartNextLineCommentCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command(
"run_macro_file", {"file": "Packages/Default/Add Line.sublime-macro"}
)
self.view.run_command("toggle_comment", {"block": False})
class GsFmtCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if gs.setting("fmt_enabled") is True:
fn = self.view.file_name() or ""
if self.is_ignored(fn):
self.log_ignored("extension", fn)
return False
if fn.endswith(".go") or gs.is_go_source_view(self.view):
return True
return False
def run(self, edit):
vsize = self.view.size()
src = self.view.substr(sublime.Region(0, vsize))
if src.isspace():
return
# Check if the file is ignored by extension or is generated.
file_name = self.view.file_name()
if self.is_ignored(file_name):
self.log_ignored("extension", file_name)
return
if self.is_generated(src):
self.log_ignored("generated", file_name)
return
res, err = mg9.fmt(file_name, src)
if err:
gs.println(DOMAIN, "cannot fmt file. error: `%s'" % err)
return
if res.get("no_change", False):
return
src = res.get("src", "")
if not src:
gs.println(DOMAIN, "cannot fmt file. it appears to be empty")
return
_, err = gspatch.merge(self.view, vsize, src, edit)
if err:
msg = "PANIC: Cannot fmt file. Check your source for errors (and maybe undo any changes)."
sublime.error_message("%s: %s: Merge failure: `%s'" % (DOMAIN, msg, err))
@classmethod
def is_generated(cls, src: str) -> bool:
# require the "Code Generated" comment to occur before the "package"
# statement so that we don't search the entire file
i = src.find("package ")
if i > 32: # min_len = len("// Code generated by DO NOT EDIT.")
return GENERATED_CODE_RE.search(src, 0, i) is not None
return False
@classmethod
def is_ignored(cls, filename: str) -> bool:
return filename is not None and filename.endswith(FMT_IGNORED_EXTS)
@classmethod
def log_ignored(cls, reason: str, file_name: str) -> None:
msg = "fmt: ignoring file ({}): {}".format(
reason, os.path.basename(file_name),
)
gs.println(DOMAIN, msg)
gs.status_message(msg)
class GsFmtSaveCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if gs.setting("fmt_enabled") is True:
fn = self.view.file_name() or ""
return fn.endswith(".go") or gs.is_go_source_view(self.view)
return False
def run(self, edit):
self.view.run_command("gs_fmt")
sublime.set_timeout(lambda: self.view.run_command("save"), 0)
class GsFmtPromptSaveAsCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return gs.is_go_source_view(self.view)
def run(self, edit):
self.view.run_command("gs_fmt")
sublime.set_timeout(lambda: self.view.run_command("prompt_save_as"), 0)
class GsGotoRowColCommand(sublime_plugin.TextCommand):
# WARN (CEV): use the Jumper logic
def run(self, edit, row, col=0):
pt = self.view.text_point(row, col)
r = sublime.Region(pt, pt)
self.view.sel().clear()
self.view.sel().add(r)
self.view.show(pt)
dmn = "gs.focus.%s:%s:%s" % (gs.view_fn(self.view), row, col)
flags = sublime.DRAW_EMPTY_AS_OVERWRITE
show = lambda: self.view.add_regions(dmn, [r], "comment", "bookmark", flags)
hide = lambda: self.view.erase_regions(dmn)
for i in range(3):
m = 300
s = i * m * 2
h = s + m
sublime.set_timeout(show, s)
sublime.set_timeout(hide, h)
# TODO (CEV): this is worthless - remove it and the set_syntax stuff
class GsNewGoFileCommand(sublime_plugin.WindowCommand):
def run(self):
pkg_name = "main"
view = gs.active_valid_go_view()
try:
basedir = gs.basedir_or_cwd(view and view.file_name())
for fn in os.listdir(basedir):
if fn.endswith(".go"):
name, _ = mg9.pkg_name(os.path.join(basedir, fn), "")
if name:
pkg_name = name
break
except Exception:
gs.error_traceback("GsNewGoFile")
self.window.new_file().run_command(
"gs_create_new_go_file", {"pkg_name": pkg_name, "file_name": "main.go"}
)
# TODO (CEV): this is worthless - remove it and the set_syntax stuff
class GsCreateNewGoFileCommand(sublime_plugin.TextCommand):
def run(self, edit, pkg_name, file_name):
view = self.view
view.set_name(file_name)
view.set_syntax_file(gs.tm_path("go"))
view.replace(edit, sublime.Region(0, view.size()), "package %s\n" % pkg_name)
view.sel().clear()
view.sel().add(view.find(pkg_name, 0, sublime.LITERAL))
class GsShowTasksCommand(sublime_plugin.WindowCommand):
def run(self):
ents = []
now = datetime.datetime.now()
m = {}
try:
tasks = gs.task_list()
ents.insert(0, ["", "%d active task(s)" % len(tasks)])
for tid, t in tasks:
cancel_text = ""
if t["cancel"]:
cancel_text = " (cancel task)"
m[len(ents)] = tid
ents.append(
[
"#%s %s%s" % (tid, t["domain"], cancel_text),
t["message"],
"started: %s" % t["start"],
"elapsed: %s" % (now - t["start"]),
]
)
except:
ents = [["", "Failed to gather active tasks"]]
def cb(i, _):
gs.cancel_task(m.get(i, ""))
gs.show_quick_panel(ents, cb)
class GsOpenHomePathCommand(sublime_plugin.WindowCommand):
def run(self, fn):
self.window.open_file(gs.home_path(fn))
class GsOpenDistPathCommand(sublime_plugin.WindowCommand):
def run(self, fn):
self.window.open_file(gs.dist_path(fn))
class GsSanityCheckCommand(sublime_plugin.WindowCommand):
def run(self):
s = "GoSublime Sanity Check\n\n%s" % "\n".join(
mg9.sanity_check_sl(mg9.sanity_check({}, True))
)
gs.show_output("GoSublime", s)
class GsSetOutputPanelContentCommand(sublime_plugin.TextCommand):
def run(self, edit, content, syntax_file, scroll_end, replace):
panel = self.view
panel.set_read_only(False)
if replace:
panel.replace(edit, sublime.Region(0, panel.size()), content)
else:
panel.insert(edit, panel.size(), content + "\n")
panel.sel().clear()
pst = panel.settings()
pst.set("rulers", [])
pst.set("fold_buttons", True)
pst.set("fade_fold_buttons", False)
pst.set("gutter", False)
pst.set("line_numbers", False)
if syntax_file:
if syntax_file == "GsDoc":
panel.set_syntax_file(gs.tm_path("doc"))
panel.run_command("fold_by_level", {"level": 1})
else:
panel.set_syntax_file(syntax_file)
panel.set_read_only(True)
if scroll_end:
panel.show(panel.size())
class GsInsertContentCommand(sublime_plugin.TextCommand):
def run(self, edit, pos, content):
pos = int(pos) # un-fucking-believable
self.view.insert(edit, pos, content)
class GsPatchImportsCommand(sublime_plugin.TextCommand):
def run(self, edit, pos, content, added_path=""):
pos = int(pos) # un-fucking-believable
view = self.view
dirty, err = gspatch.merge(view, pos, content, edit)
if err:
gs.notice_undo(DOMAIN, err, view, dirty)
elif dirty:
k = "last_import_path.%s" % gs.view_fn(self.view)
if added_path:
gs.set_attr(k, added_path)
else:
gs.del_attr(k)
| 31.935484
| 102
| 0.580471
|
c745ab4e1060e8b72708f7bbaa04fa2de5ad9536
| 6,071
|
py
|
Python
|
tests/projects/test_docker_projects.py
|
cclauss/mlflow
|
05f18899df6db3364673d39910cc71bef4b95d27
|
[
"Apache-2.0"
] | 3
|
2019-10-07T01:12:25.000Z
|
2020-07-06T04:27:51.000Z
|
tests/projects/test_docker_projects.py
|
aelmasry/mlflow
|
50efb4e4a7af6c351592f027681243d59f288254
|
[
"Apache-2.0"
] | 15
|
2019-10-07T01:11:46.000Z
|
2022-03-08T23:33:53.000Z
|
tests/projects/test_docker_projects.py
|
aelmasry/mlflow
|
50efb4e4a7af6c351592f027681243d59f288254
|
[
"Apache-2.0"
] | 6
|
2019-11-28T13:23:35.000Z
|
2020-07-08T19:22:12.000Z
|
import os
import mock
import pytest
from databricks_cli.configure.provider import DatabricksConfig
import mlflow
from mlflow.entities import ViewType
from mlflow.projects import ExecutionException, _get_docker_image_uri
from mlflow.store import file_store
from mlflow.utils.mlflow_tags import MLFLOW_PROJECT_ENV, MLFLOW_DOCKER_IMAGE_URI, \
MLFLOW_DOCKER_IMAGE_ID
from tests.projects.utils import TEST_DOCKER_PROJECT_DIR
from tests.projects.utils import docker_example_base_image # pylint: disable=unused-import
from tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import
from mlflow.projects import _project_spec
def _build_uri(base_uri, subdirectory):
if subdirectory != "":
return "%s#%s" % (base_uri, subdirectory)
return base_uri
@pytest.mark.parametrize("use_start_run", map(str, [0, 1]))
def test_docker_project_execution(
use_start_run,
tmpdir, tracking_uri_mock, docker_example_base_image): # pylint: disable=unused-argument
expected_params = {"use_start_run": use_start_run}
submitted_run = mlflow.projects.run(
TEST_DOCKER_PROJECT_DIR, experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID,
parameters=expected_params, entry_point="test_tracking")
# Validate run contents in the FileStore
run_id = submitted_run.run_id
mlflow_service = mlflow.tracking.MlflowClient()
run_infos = mlflow_service.list_run_infos(
experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID,
run_view_type=ViewType.ACTIVE_ONLY)
assert len(run_infos) == 1
store_run_id = run_infos[0].run_id
assert run_id == store_run_id
run = mlflow_service.get_run(run_id)
assert run.data.params == expected_params
assert run.data.metrics == {"some_key": 3}
exact_expected_tags = {MLFLOW_PROJECT_ENV: "docker"}
approx_expected_tags = {
MLFLOW_DOCKER_IMAGE_URI: "docker-example",
MLFLOW_DOCKER_IMAGE_ID: "sha256:",
}
run_tags = run.data.tags
for k, v in exact_expected_tags.items():
assert run_tags[k] == v
for k, v in approx_expected_tags.items():
assert run_tags[k].startswith(v)
artifacts = mlflow_service.list_artifacts(run_id=run_id)
assert len(artifacts) == 1
@pytest.mark.parametrize("tracking_uri, expected_command_segment", [
(None, "-e MLFLOW_TRACKING_URI=/mlflow/tmp/mlruns"),
("http://some-tracking-uri", "-e MLFLOW_TRACKING_URI=http://some-tracking-uri"),
("databricks://some-profile", "-e MLFLOW_TRACKING_URI=databricks ")
])
@mock.patch('databricks_cli.configure.provider.ProfileConfigProvider')
def test_docker_project_tracking_uri_propagation(
ProfileConfigProvider, tmpdir, tracking_uri,
expected_command_segment, docker_example_base_image): # pylint: disable=unused-argument
mock_provider = mock.MagicMock()
mock_provider.get_config.return_value = \
DatabricksConfig("host", "user", "pass", None, insecure=True)
ProfileConfigProvider.return_value = mock_provider
# Create and mock local tracking directory
local_tracking_dir = os.path.join(tmpdir.strpath, "mlruns")
if tracking_uri is None:
tracking_uri = local_tracking_dir
old_uri = mlflow.get_tracking_uri()
try:
mlflow.set_tracking_uri(tracking_uri)
with mock.patch("mlflow.tracking.utils._get_store") as _get_store_mock:
_get_store_mock.return_value = file_store.FileStore(local_tracking_dir)
mlflow.projects.run(
TEST_DOCKER_PROJECT_DIR, experiment_id=file_store.FileStore.DEFAULT_EXPERIMENT_ID)
finally:
mlflow.set_tracking_uri(old_uri)
def test_docker_uri_mode_validation(
tracking_uri_mock, docker_example_base_image): # pylint: disable=unused-argument
with pytest.raises(ExecutionException):
mlflow.projects.run(TEST_DOCKER_PROJECT_DIR, backend="databricks")
@mock.patch('mlflow.projects._get_git_commit')
def test_docker_image_uri_with_git(get_git_commit_mock):
get_git_commit_mock.return_value = '1234567890'
image_uri = _get_docker_image_uri("my_project", "my_workdir")
assert image_uri == "my_project:1234567"
get_git_commit_mock.assert_called_with('my_workdir')
@mock.patch('mlflow.projects._get_git_commit')
def test_docker_image_uri_no_git(get_git_commit_mock):
get_git_commit_mock.return_value = None
image_uri = _get_docker_image_uri("my_project", "my_workdir")
assert image_uri == "my_project"
get_git_commit_mock.assert_called_with('my_workdir')
def test_docker_valid_project_backend_local():
work_dir = "./examples/docker"
project = _project_spec.load_project(work_dir)
mlflow.projects._validate_docker_env(project)
def test_docker_invalid_project_backend_local():
work_dir = "./examples/docker"
project = _project_spec.load_project(work_dir)
project.name = None
with pytest.raises(ExecutionException):
mlflow.projects._validate_docker_env(project)
@pytest.mark.parametrize("artifact_uri, host_artifact_uri, container_artifact_uri, should_mount", [
("/tmp/mlruns/artifacts", "/tmp/mlruns/artifacts", "/tmp/mlruns/artifacts", True),
("s3://my_bucket", None, None, False),
("file:///tmp/mlruns/artifacts", "/tmp/mlruns/artifacts", "/tmp/mlruns/artifacts", True),
("./mlruns", os.path.abspath("./mlruns"), "/mlflow/projects/code/mlruns", True)
])
def test_docker_mount_local_artifact_uri(artifact_uri, host_artifact_uri,
container_artifact_uri, should_mount):
active_run = mock.MagicMock()
run_info = mock.MagicMock()
run_info.run_id = "fake_run_id"
run_info.experiment_id = "fake_experiment_id"
run_info.artifact_uri = artifact_uri
active_run.info = run_info
image = mock.MagicMock()
image.tags = ["image:tag"]
docker_command = mlflow.projects._get_docker_command(image, active_run)
docker_volume_expected = "-v {}:{}".format(host_artifact_uri, container_artifact_uri)
assert (docker_volume_expected in " ".join(docker_command)) == should_mount
| 41.582192
| 99
| 0.748641
|
c232a78fc7e8b959a75d7390ae2b8dd837067df8
| 88
|
py
|
Python
|
tests/test_cli.py
|
vivainio/cftool
|
e3d53b3849e271c6ab93df426b5f543f38af5617
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
vivainio/cftool
|
e3d53b3849e271c6ab93df426b5f543f38af5617
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
vivainio/cftool
|
e3d53b3849e271c6ab93df426b5f543f38af5617
|
[
"MIT"
] | null | null | null |
from cfut.cli import do_ecr_push
def test_do_ecr_publish(init):
do_ecr_push(None)
| 14.666667
| 32
| 0.784091
|
c271c44f47b4dba0e192db479ec207eb8681558c
| 3,248
|
py
|
Python
|
src/ansiblemdgen/AutoDocumenterIndex.py
|
murphypetercl/ansible-mdgen
|
86668f29bed5d206bca01fcbf568c8cd45cd8b4d
|
[
"MIT"
] | 14
|
2021-01-27T23:54:44.000Z
|
2022-03-30T01:27:54.000Z
|
src/ansiblemdgen/AutoDocumenterIndex.py
|
murphypetercl/ansible-mdgen
|
86668f29bed5d206bca01fcbf568c8cd45cd8b4d
|
[
"MIT"
] | 7
|
2021-01-29T20:31:50.000Z
|
2021-03-31T17:10:00.000Z
|
src/ansiblemdgen/AutoDocumenterIndex.py
|
murphypetercl/ansible-mdgen
|
86668f29bed5d206bca01fcbf568c8cd45cd8b4d
|
[
"MIT"
] | 1
|
2022-03-18T18:59:41.000Z
|
2022-03-18T18:59:41.000Z
|
#!/usr/bin/env python3
from ansiblemdgen.Config import SingleConfig
import sys
import yaml
import os
from os import walk
from ansiblemdgen.Utils import SingleLog,FileUtils
from mdutils.mdutils import MdUtils
class IndexWriter:
def __init__(self):
self.config = SingleConfig()
self.log = SingleLog()
def render(self):
self.createIndexMDFile()
def createIndexMDFile(self):
self.log.info("(createIndexMDFile) Create Index MD File")
role_name = self.config.get_base_dir()[self.config.get_base_dir().rfind('/')+1:]
page_title = "Role: "+role_name
mdFile = MdUtils(file_name=self.config.get_output_dir()+"/index.md")
self.createMDFileContent(mdFile)
mdFile.create_md_file()
self.log.info("(createIndexMDFile) Create Index MD File Complete")
def createMDFileContent(self, mdFile):
author = ''
description = ''
company = ''
license = ''
min_ansible_version = ''
dependencies = []
galaxy_metafile = self.config.get_base_dir()+'/meta/main.yml'
if os.path.isfile(galaxy_metafile):
with open(galaxy_metafile, 'r') as stream:
try:
metadata = yaml.safe_load(stream)
author = metadata.get("galaxy_info").get('author')
description = metadata.get("galaxy_info").get('description')
company = metadata.get("galaxy_info").get('company')
license = metadata.get("galaxy_info").get('license')
min_ansible_version = metadata.get("galaxy_info").get('min_ansible_version')
dependencies = metadata.get('dependencies')
except yaml.YAMLError as exc:
print(exc)
else:
self.log.info("(createIndexMDFile) No meta/main.yml file")
role_name = self.config.get_base_dir()[self.config.get_base_dir().rfind('/')+1:]
mdFile.new_header(level=1, title='Home')
mdFile.new_line("---")
mdFile.new_header(level=2, title='Role Name')
mdFile.new_line(role_name)
mdFile.new_line()
mdFile.new_line("---")
mdFile.new_header(level=2, title='Description')
mdFile.new_line(description)
mdFile.new_line()
mdFile.new_line("---")
mdFile.new_header(level=2, title='Dependencies')
if dependencies != []:
for dependency in dependencies:
if isinstance(dependency, dict):
for dep_part in dependency:
mdFile.new_line("> "+dep_part+": "+dependency[dep_part])
else:
mdFile.new_line("> "+ dependency)
mdFile.new_line()
else:
mdFile.new_line('None')
mdFile.new_line()
mdFile.new_line("---")
mdFile.new_header(level=2, title='Information')
table_entries = ["Author", "Company", "License","Minimum Ansible Version"]
table_entries.extend([author, company, license, str(min_ansible_version)])
mdFile.new_line()
mdFile.new_table(columns=4, rows=2, text=table_entries, text_align='center')
| 32.808081
| 96
| 0.593596
|
8496ae51fad6daae86de2ec33d946cefd84dd494
| 4,695
|
py
|
Python
|
qiskit/optimization/ising/stable_set.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/ising/stable_set.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/ising/stable_set.py
|
IanJoel/qiskit-aqua
|
7707172d01f0539358f1ce2406f307e830105303
|
[
"Apache-2.0"
] | 2
|
2020-02-13T02:17:58.000Z
|
2020-08-09T07:56:25.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Convert stable set instances into Pauli list. We read instances in
the Gset format, see https://web.stanford.edu/~yyye/yyye/Gset/ , for
compatibility with the maxcut format, but the weights on the edges
as they are not really used and are always assumed to be 1. The
graph is represented by an adjacency matrix.
"""
import logging
import warnings
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator
logger = logging.getLogger(__name__)
def get_operator(w):
"""Generate Hamiltonian for the maximum stable set in a graph.
Args:
w (numpy.ndarray) : adjacency matrix.
Returns:
tuple(WeightedPauliOperator, float): operator for the Hamiltonian and a
constant shift for the obj function.
"""
num_nodes = len(w)
pauli_list = []
shift = 0
for i in range(num_nodes):
for j in range(i+1, num_nodes):
if w[i, j] != 0:
x_p = np.zeros(num_nodes, dtype=np.bool)
z_p = np.zeros(num_nodes, dtype=np.bool)
z_p[i] = True
z_p[j] = True
pauli_list.append([1.0, Pauli(z_p, x_p)])
shift += 1
for i in range(num_nodes):
degree = np.sum(w[i, :])
x_p = np.zeros(num_nodes, dtype=np.bool)
z_p = np.zeros(num_nodes, dtype=np.bool)
z_p[i] = True
pauli_list.append([degree - 1/2, Pauli(z_p, x_p)])
return WeightedPauliOperator(paulis=pauli_list), shift - num_nodes/2
def stable_set_value(x, w):
"""Compute the value of a stable set, and its feasibility.
Args:
x (numpy.ndarray): binary string in original format -- not
graph solution!.
w (numpy.ndarray): adjacency matrix.
Returns:
tuple(float, bool): size of the stable set, and Boolean indicating
feasibility.
"""
assert len(x) == w.shape[0]
feasible = True
num_nodes = w.shape[0]
for i in range(num_nodes):
for j in range(i+1, num_nodes):
if w[i, j] != 0 and x[i] == 0 and x[j] == 0:
feasible = False
break
return len(x) - np.sum(x), feasible
def get_graph_solution(x):
"""Get graph solution from binary string.
Args:
x (numpy.ndarray) : binary string as numpy array.
Returns:
numpy.ndarray: graph solution as binary numpy array.
"""
return 1 - x
def random_graph(n, edge_prob=0.5, savefile=None, seed=None):
""" random graph """
# pylint: disable=import-outside-toplevel
from .common import random_graph as redirect_func
warnings.warn("random_graph function has been moved to "
"qiskit.optimization.ising.common, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(n=n, weight_range=2, edge_prob=edge_prob,
negative_weight=False, savefile=savefile, seed=seed)
def parse_gset_format(filename):
""" parse gset format """
# pylint: disable=import-outside-toplevel
from .common import parse_gset_format as redirect_func
warnings.warn("parse_gset_format function has been moved to "
"qiskit.optimization.ising.common, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(filename)
def sample_most_likely(state_vector):
""" sample most likely """
# pylint: disable=import-outside-toplevel
from .common import sample_most_likely as redirect_func
warnings.warn("sample_most_likely function has been moved to "
"qiskit.optimization.ising.common, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(state_vector=state_vector)
def get_stable_set_qubitops(w):
""" get stable set qubit ops """
warnings.warn("get_stable_set_qubitops function has been changed to get_operator"
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return get_operator(w)
| 33.297872
| 85
| 0.644941
|
fceb36dd10a7c2e8d6eea587eff7fe47021932ef
| 3,741
|
py
|
Python
|
HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 7
|
2017-06-02T20:31:22.000Z
|
2021-04-05T13:52:33.000Z
|
HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | null | null | null |
HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 9
|
2019-01-24T17:43:41.000Z
|
2021-07-25T18:08:34.000Z
|
"""
Object name: HorizontalCylinder
Functions: Gr(g,beta,DT,D,nu) gives the Grashoff number based on:
gravity g, thermal expansion coefficient beta, Temperature difference DT,
length scale D, viscosity nu
Ra(g,beta,DT,D,nu,alpha) gives the Rayleigh number where alpha is the thermal conductivity.
"""
import numpy as np
import scipy
import scipy.optimize
class HorizontalCylinder(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,correlation="Morgan", Ra=0.0, Pr = 0.0):
self.correlation = correlation
self.Ra = Ra
if correlation == "Morgan":
if (Ra <= 1e-2):
C=0.675
n=0.058
elif (Ra <= 1e2):
C=1.02
n=0.148
elif (Ra <= 1e4):
C=0.85
n=0.188
elif (Ra <= 1e7):
C=0.480
n=0.250
elif (Ra <= 1e12):
C=0.125
n=0.333
self.Nu = C*Ra**n
elif correlation == "Churchill-Chu":
if Pr == 0.:
print("Warning you must specify Pr for Churchill and Chu correlation")
else:
self.Nu = (0.60+(0.387*Ra**(1./6.))/(1.+(0.559/Pr)**(9./16.))**(8./27.))**2
else:
print("Warning wrong correlation name")
class VerticalEnclosure(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,Ra,Pr,H,L):
self.Ra = Ra
self.Pr = Pr
self.H = H
self.L = L
if correlation == "Morgan":
if (H/L) < 2.:
if Ra*Pr/(0.2+Pr)> 1.e3:
self.Nu = 0.18*(Pr/(0.2+Pr)*Ra)**0.29
else:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif H/L < 10:
if Ra < 1e10:
self.Nu = 0.22*(Pr/(0.2+Pr)*Ra)**0.28*(H/L)**(-0.25)
else:
print('Ra is too high for this correlation')
self.Nu = np.inf
elif Ra < 1e4:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif Ra < 1e7:
if Pr > 0.6 and Pr < 2e4:
print('ok')
self.Nu =0.42*Ra**0.25*Pr**0.012*(H/L)**(-0.3)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
elif Ra < 1e9:
if Pr > 0.6 and Pr < 20.:
self.Nu =0.46*Ra**(1./3.)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
else:
print('Ra is too high, got nothing for you')
self.Nu = np.inf
def Gr(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0):
return (g*beta*DT*D**3)/(nu**2)
def Ra(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0,alpha=1.0):
return (g*beta*DT*D**3)/(nu*alpha)
| 35.971154
| 102
| 0.493718
|
8a082a521a272e45316d72513cad8cc5c78a172e
| 572
|
py
|
Python
|
code_eval/utils/shapenet_pre_handle.py
|
leonardozcm/Point-Completion-Fig-AutoGenerator
|
109f5a414f51469fac82d0d23cde69efb9cf97e0
|
[
"Apache-2.0"
] | null | null | null |
code_eval/utils/shapenet_pre_handle.py
|
leonardozcm/Point-Completion-Fig-AutoGenerator
|
109f5a414f51469fac82d0d23cde69efb9cf97e0
|
[
"Apache-2.0"
] | null | null | null |
code_eval/utils/shapenet_pre_handle.py
|
leonardozcm/Point-Completion-Fig-AutoGenerator
|
109f5a414f51469fac82d0d23cde69efb9cf97e0
|
[
"Apache-2.0"
] | null | null | null |
import os
def pre_handle(path):
shapenet_list = []
for folder in os.listdir(path):
dense_path = path + "/" + folder + "/pcds/dense/"
gt_path = path + "/" + folder + "/pcds/gt/"
item_list = []
for pcd in os.listdir(dense_path):
item_list.append([dense_path + pcd,gt_path + pcd])
shapenet_list.append(item_list)
return shapenet_list
if __name__ == "__main__":
path = "../../dataset/ShapeNetCompletion/PRNet_Voxel/test"
shape_net_list = pre_handle(path)
print(shape_net_list)
| 28.6
| 63
| 0.601399
|
c81e8d507d33b857658f520f84f36e820df38171
| 4,126
|
py
|
Python
|
cryomem/common/plotproc.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | 1
|
2018-09-16T12:29:04.000Z
|
2018-09-16T12:29:04.000Z
|
cryomem/common/plotproc.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
cryomem/common/plotproc.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
import multiprocessing as mp
import queue
import matplotlib as mpl
mpl.use("tkagg")
import matplotlib.pyplot as plt
import time
from .plothyst import plothyst
#def _f_proc(q, **kwargs):
# """Process target function"""
# _Plotter(q, **kwargs)
class _Plotter(mp.Process):
"""Core plotting object. Controlled by queue message."""
def __init__(self, q_in, q_out, **kwargs):
"""Init with keyword arguments for plotting parameters."""
super().__init__()
self.q_in = q_in
self.q_out = q_out
self.kwargs = kwargs
def run(self):
"""Loop"""
self._setup_plot(**self.kwargs)
while True:
if self.q_in.empty():
plt.pause(0.1)
else:
msg = self.q_in.get()
if msg[0] == "exit":
plt.close(self.fig)
break
else:
self._process_msg(msg)
def _process_msg(self, msg):
"""Process the argument tuple, of which 1st element is the command."""
if msg[0] == "plot":
self._update_plot(msg[1:])
#self._update_plot2(msg[1:])
elif msg[0] == "get_wloc":
self.q_out.put(self._get_wloc())
else:
print("Invalid queue message:", msg)
def _setup_plot(self, **kwargs):
"""Create figure."""
self.xlabel = kwargs.pop("xlabel", "x")
self.ylabel = kwargs.pop("ylabel", "y")
self.title = kwargs.pop("title", "Plot")
self.plotparams = kwargs
self.plotstyle = "o-"
mpl.rcParams["toolbar"] = "None" # disable toolbar
self.fig = plt.figure()
self._set_wloc() # place window
self._set_title(self.title) # set title
self.ax = self.fig.add_subplot(111)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
plt.pause(0.1)
def _update_plot(self, msg):
x, y = msg[0], msg[1]
self.ax.cla()
self.line = self.ax.plot(x, y, self.plotstyle)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
plt.tight_layout()
plt.pause(0.1)
def _update_plot2(self, msg):
x, y = msg[0], msg[1]
self.ax.cla()
#self.line = self.ax.plot(x, y, self.plotstyle)
self.line = plothyst(x, y, self.plotstyle)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
plt.tight_layout()
plt.pause(0.1)
def _set_title(self, title):
cfm = plt.get_current_fig_manager()
cfm.set_window_title(title)
def _set_wloc(self):
cfm = plt.get_current_fig_manager()
if "wx" in self.plotparams and "wy" in self.plotparams:
cfm.window.geometry("+{}+{}".format(self.plotparams["wx"], self.plotparams["wy"]))
def _get_wloc(self):
cfm = plt.get_current_fig_manager()
return tuple(map(int, cfm.window.geometry().split("+")[1:]))
class PlotProc:
"""Spawn and manage plotting process."""
def __init__(self, **kwargs):
"""Init with keyword arguments for plotting parameters."""
self.q_in = mp.Queue()
self.q_out = mp.Queue()
#self.proc = mp.Process(target=_Plotter, args=(self.q,), kwargs=kwargs)
self.proc = _Plotter(self.q_out, self.q_in, **kwargs)
#self.proc = _Plotter()
self.proc.start()
def plot(self, x, y):
"""Update plot with arguments x, y."""
self.q_out.put(("plot", x, y))
def close(self):
"""Finish plotting thread."""
self.q_out.put(("exit",))
self.proc.join()
def get_wloc(self):
self.q_out.put(("get_wloc",))
return self.q_in.get()
def demo():
import numpy as np
import time
x = np.arange(10)
y = np.arange(10)
pp = PlotProc()
for k, _ in enumerate(x):
print(k, x[:(k+1)], y[:(k+1)])
pp.plot(x[:(k+1)], y[:(k+1)])
time.sleep(1)
pp.close()
print("demo() finishing.")
if __name__ == "__main__":
demo()
| 30.338235
| 94
| 0.554532
|
00e4870359998193f0559d67e24c0df288e0876c
| 975
|
py
|
Python
|
examples/neko.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
examples/neko.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
examples/neko.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
from anekos import NekosLifeClient, SFWImageTags
import discord
from discord.ext import commands
neko = NekosLifeClient()
client = commands.Bot(command_prefix=".")
"""
<'solog', 'smug', 'feet', 'smallboobs', 'lewdkemo', 'woof', 'gasm', 'solo', '8ball', 'goose', 'cuddle', 'avatar', 'cum', 'slap', 'les', 'v3', 'erokemo', 'bj', 'pwankg', 'nekoapi_v3.1', 'ero', 'hololewd', 'pat', 'gecg', 'holo', 'poke', 'feed', 'fox_girl', 'tits', 'nsfw_neko_gif', 'eroyuri', 'holoero', 'pussy', 'Random_hentai_gif', 'lizard', 'yuri', 'keta', 'neko', 'hentai', 'feetg', 'eron', 'erok', 'baka', 'kemonomimi', 'hug', 'cum_jpg', 'nsfw_avatar', 'erofeet', 'meow', 'kiss', 'wallpaper', 'tickle', 'blowjob', 'spank', 'kuni', 'classic', 'waifu', 'femdom', 'boobs', 'trap', 'lewd', 'pussy_jpg', 'anal', 'futanari', 'ngif', 'lewdk'>"
"""
@client.command()
async def nekos(ctx):
result = await neko.image(SFWImageTags.CUDDLE)
await ctx.send(result.url)
client.run('token')
| 54.166667
| 640
| 0.624615
|
898014f906d180a1e1f52b8cbe5b548feec2fc70
| 5,092
|
py
|
Python
|
tests/integration/lambdas/lambda_integration.py
|
lsdev693/localstack
|
f384f5837caad840201258a3d94e19d4bf68571f
|
[
"Apache-2.0"
] | 1
|
2020-02-21T04:43:55.000Z
|
2020-02-21T04:43:55.000Z
|
tests/integration/lambdas/lambda_integration.py
|
lsdev693/localstack
|
f384f5837caad840201258a3d94e19d4bf68571f
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/lambdas/lambda_integration.py
|
lsdev693/localstack
|
f384f5837caad840201258a3d94e19d4bf68571f
|
[
"Apache-2.0"
] | 1
|
2020-04-17T15:02:32.000Z
|
2020-04-17T15:02:32.000Z
|
import json
import base64
import logging
import boto3.dynamodb.types
from io import BytesIO
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, to_bytes
TEST_BUCKET_NAME = 'test-bucket'
KINESIS_STREAM_NAME = 'test_stream_1'
MSG_BODY_RAISE_ERROR_FLAG = 'raise_error'
MSG_BODY_MESSAGE_TARGET = 'message_target'
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
# Subclass of boto's TypeDeserializer for DynamoDB
# to adjust for DynamoDB Stream format.
class TypeDeserializer(boto3.dynamodb.types.TypeDeserializer):
def _deserialize_n(self, value):
return float(value)
def _deserialize_b(self, value):
return value # already in Base64
def handler(event, context):
""" Generic event forwarder Lambda. """
# print test messages (to test CloudWatch Logs integration)
LOGGER.info('Lambda log message - logging module')
print('Lambda log message - print function')
if MSG_BODY_RAISE_ERROR_FLAG in event:
raise Exception('Test exception (this is intentional)')
if 'httpMethod' in event:
# looks like this is a call from an AWS_PROXY API Gateway
try:
body = json.loads(event['body'])
except Exception:
body = {}
body['pathParameters'] = event.get('pathParameters')
body['requestContext'] = event.get('requestContext')
body['queryStringParameters'] = event.get('queryStringParameters')
body['httpMethod'] = event.get('httpMethod')
status_code = body.get('return_status_code', 200)
headers = body.get('return_headers', {})
body = body.get('return_raw_body') or body
return {
'body': body,
'statusCode': status_code,
'headers': headers
}
if 'Records' not in event:
return {
'event': event,
'context': {
'invoked_function_arn': context.invoked_function_arn,
'function_version': context.function_version,
'function_name': context.function_name
}
}
raw_event_messages = []
for record in event['Records']:
# Deserialize into Python dictionary and extract the
# "NewImage" (the new version of the full ddb document)
ddb_new_image = deserialize_event(record)
if MSG_BODY_RAISE_ERROR_FLAG in ddb_new_image.get('data', {}):
raise Exception('Test exception (this is intentional)')
# Place the raw event message document into the Kinesis message format
kinesis_record = {
'PartitionKey': 'key123',
'Data': json.dumps(ddb_new_image)
}
if MSG_BODY_MESSAGE_TARGET in ddb_new_image.get('data', {}):
forwarding_target = ddb_new_image['data'][MSG_BODY_MESSAGE_TARGET]
target_name = forwarding_target.split(':')[-1]
if forwarding_target.startswith('kinesis:'):
ddb_new_image['data'][MSG_BODY_MESSAGE_TARGET] = 's3:/test_chain_result'
kinesis_record['Data'] = json.dumps(ddb_new_image['data'])
forward_event_to_target_stream(kinesis_record, target_name)
elif forwarding_target.startswith('s3:'):
s3_client = aws_stack.connect_to_service('s3')
test_data = to_bytes(json.dumps({'test_data': ddb_new_image['data']['test_data']}))
s3_client.upload_fileobj(BytesIO(test_data), TEST_BUCKET_NAME, target_name)
else:
raw_event_messages.append(kinesis_record)
# Forward messages to Kinesis
forward_events(raw_event_messages)
def deserialize_event(event):
# Deserialize into Python dictionary and extract the "NewImage" (the new version of the full ddb document)
ddb = event.get('dynamodb')
if ddb:
result = {
'__action_type': event.get('eventName'),
}
ddb_deserializer = TypeDeserializer()
if ddb.get('OldImage'):
result['old_image'] = ddb_deserializer.deserialize({'M': ddb.get('OldImage')})
if ddb.get('NewImage'):
result['new_image'] = ddb_deserializer.deserialize({'M': ddb.get('NewImage')})
return result
kinesis = event.get('kinesis')
if kinesis:
assert kinesis['sequenceNumber']
kinesis['data'] = json.loads(to_str(base64.b64decode(kinesis['data'])))
return kinesis
sqs = event.get('sqs')
if sqs:
result = {'data': event['body']}
return result
sns = event.get('Sns')
if sns:
result = {'data': sns['Message']}
return result
def forward_events(records):
if not records:
return
kinesis = aws_stack.connect_to_service('kinesis')
kinesis.put_records(StreamName=KINESIS_STREAM_NAME, Records=records)
def forward_event_to_target_stream(record, stream_name):
kinesis = aws_stack.connect_to_service('kinesis')
kinesis.put_record(StreamName=stream_name, Data=record['Data'], PartitionKey=record['PartitionKey'])
| 36.113475
| 110
| 0.654556
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.