code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# This file is part of https://github.com/26fe/jsonstat.py
# Copyright (C) 2016-2021 gf <gf@26fe.com>
# See LICENSE file
__version__ = '0.2.0'
| 26fe/jsonstat.py | jsonstat/version.py | Python | lgpl-3.0 | 168 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for sparse model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.utils import t2t_model
import tensorflow.compat.v1 as tf
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
from tensorflow.contrib.model_pruning.python import pruning as magnitude_pruning
def pruning_hparams(hparams, use_tpu, random): # pylint: disable=unused-argument
"""Helper to get hparams for pruning library."""
weight_sparsity_map = [""]
if hparams.get("embedding_sparsity") >= 0.0:
weight_sparsity_map = [
"transformer/symbol_modality_33288_512/shared/:{}"
.format(hparams.get("embedding_sparsity"))
]
tf.logging.info(
"Pruning embedding matrix to {}% sparsity"
.format(hparams.get("embedding_sparsity") * 100))
hparams = contrib_training.HParams(
name="model_pruning",
begin_pruning_step=hparams.get("begin_pruning_step"),
end_pruning_step=hparams.get("end_pruning_step"),
weight_sparsity_map=weight_sparsity_map,
threshold_decay=hparams.get("threshold_decay"),
pruning_frequency=hparams.get("pruning_frequency"),
nbins=hparams.get("nbins"),
block_height=1,
block_width=1,
block_pooling_function="AVG",
initial_sparsity=0.0, # always start at sparsity 0
target_sparsity=hparams.get("target_sparsity"),
sparsity_function_begin_step=hparams.get("begin_pruning_step"),
sparsity_function_end_step=hparams.get("end_pruning_step"),
sparsity_function_exponent=hparams.get("sparsity_function_exponent"),
use_tpu=use_tpu)
# TODO(tgale): Fix the need to keep this commented out.
# random pruning currently does not work.
# random=random)
return hparams
def check_global_sparsity():
"""Add a summary for the weight sparsity."""
weight_masks = magnitude_pruning.get_masks()
weights_per_layer = []
nonzero_per_layer = []
for mask in weight_masks:
nonzero_per_layer.append(tf.reduce_sum(mask))
weights_per_layer.append(tf.size(mask))
total_nonzero = tf.add_n(nonzero_per_layer)
total_weights = tf.add_n(weights_per_layer)
sparsity = (1.0 - (tf.cast(total_nonzero, tf.float32) /
tf.cast(total_weights, tf.float32)))
tf.summary.scalar("global_weight_sparsity", sparsity)
class SparseModel(t2t_model.T2TModel):
"""T2T model with weight sparsity."""
def initialize_masks_from_ckpt(self, checkpoint):
model_dir = self._hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
tf.logging.info("Checkpoint exists in model_dir, not loading variables.")
return
# Create a list of mask variables to load
reader = tf.train.NewCheckpointReader(checkpoint)
mask_names = reader.get_variable_to_shape_map().keys()
mask_names = [x for x in mask_names if x.endswith("mask")]
variable_map = {}
for var in tf.global_variables():
var_name = var.name.split(":")[0]
if var_name in mask_names:
tf.logging.info("Loading mask variable from checkpoint: %s", var_name)
variable_map[var_name] = var
elif "mask" in var_name:
tf.logging.info(
"Cannot find mask variable in checkpoint, skipping: %s", var_name)
tf.train.init_from_checkpoint(checkpoint, variable_map)
def initialize_non_masks_from_ckpt(self, checkpoint):
model_dir = self._hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
tf.logging.info("Checkpoint exists in model_dir, not loading variables.")
return
# Create a list of non-mask variables to load
reader = tf.train.NewCheckpointReader(checkpoint)
non_mask_names = reader.get_variable_to_shape_map().keys()
non_mask_names = [x for x in non_mask_names if not x.endswith("mask")]
variable_map = {}
for var in tf.global_variables():
var_name = var.name.split(":")[0]
if var_name in non_mask_names:
tf.logging.info(
"Loading non-mask variable from checkpoint: %s", var_name)
variable_map[var_name] = var
elif "mask" not in var_name:
tf.logging.info(
"Cannot find non-mask variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(checkpoint, variable_map)
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode."""
train_op = self.optimize(
loss,
num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
sparsity_technique = self._hparams.get("sparsity_technique")
if "pruning" in sparsity_technique:
if not self._hparams.load_masks_from:
# If we are loading trained masks, don't add the mask update
# step to the training process and keep the masks static
with tf.control_dependencies([train_op]):
mp_hparams = pruning_hparams(
self._hparams,
use_tpu,
sparsity_technique == "random_pruning")
p = magnitude_pruning.Pruning(
mp_hparams,
global_step=tf.train.get_global_step())
mask_update_op = p.conditional_mask_update_op()
train_op = mask_update_op
check_global_sparsity()
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(
self._hparams.warm_start_from)
return tf.train.Scaffold()
elif self._hparams.load_masks_from and self._hparams.load_weights_from:
def scaffold_fn():
self.initialize_masks_from_ckpt(
self._hparams.load_masks_from)
self.initialize_non_masks_from_ckpt(
self._hparams.load_weights_from)
return tf.train.Scaffold()
elif self._hparams.load_masks_from:
def scaffold_fn():
self.initialize_masks_from_ckpt(
self._hparams.load_masks_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = t2t_model.create_host_call(self.hparams.model_dir)
else:
host_call = None
t2t_model.remove_summaries()
return contrib_tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(
self._hparams.warm_start_from)
elif self._hparams.load_masks_from:
self.initialize_masks_from_ckpt(
self._hparams.load_masks_from)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op)
| google-research/google-research | state_of_sparsity/sparse_transformer/models/sparse_model.py | Python | apache-2.0 | 7,729 |
# -*- encoding: utf-8 -*-
"""Implements Operating System UI."""
from robottelo.constants import FILTER
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class OperatingSys(Base):
"""Manipulates Foreman's operating system from UI."""
def navigate_to_entity(self):
"""Navigate to OS entity page"""
Navigator(self.browser).go_to_operating_systems()
def _search_locator(self):
"""Specify locator for OS entity search procedure"""
return locators['operatingsys.operatingsys_name']
def _configure_os(self, archs, ptables, mediums, select,
minor_version=None, description=None, os_family=None,
template=None, arch_list=None, ptable_list=None,
medium_list=None):
"""Configures the operating system details."""
tab_primary_locator = tab_locators['tab_primary']
tab_ptable_locator = tab_locators['operatingsys.tab_ptable']
tab_medium_locator = tab_locators['operatingsys.tab_medium']
if minor_version:
if self.wait_until_element(
locators['operatingsys.minor_version']):
self.field_update('operatingsys.minor_version', minor_version)
if description:
if self.wait_until_element(
locators['operatingsys.description']):
self.field_update('operatingsys.description', description)
if os_family:
self.select(locators['operatingsys.family'], os_family)
if archs or arch_list:
self.configure_entity(
archs,
FILTER['os_arch'],
tab_locator=tab_primary_locator,
new_entity_list=arch_list,
entity_select=select
)
if ptables or ptable_list:
self.configure_entity(
ptables,
FILTER['os_ptable'],
tab_locator=tab_ptable_locator,
new_entity_list=ptable_list,
entity_select=select
)
if mediums or medium_list:
self.configure_entity(
mediums,
FILTER['os_medium'],
tab_locator=tab_medium_locator,
new_entity_list=medium_list,
entity_select=select
)
if template:
self.click(tab_locators['operatingsys.tab_templates'])
self.select(locators['operatingsys.template'], template)
def create(self, name, major_version=None,
minor_version=None, description=None, os_family=None,
archs=None, ptables=None, mediums=None, select=True,
template=None):
"""Create operating system from UI."""
new_os = self.wait_until_element(locators['operatingsys.new'])
if new_os:
new_os.click()
os_name_locator = locators['operatingsys.name']
os_major_locator = locators['operatingsys.major_version']
if self.wait_until_element(os_name_locator):
self.find_element(os_name_locator).send_keys(name)
if self.wait_until_element(os_major_locator):
self.find_element(os_major_locator).send_keys(major_version)
self._configure_os(
archs,
ptables,
mediums,
select,
minor_version,
description,
os_family,
template,
arch_list=None,
ptable_list=None,
medium_list=None
)
self.click(common_locators['submit'])
else:
raise UIError(u'Could not create OS without major_version')
else:
raise UIError(
u'Could not create new operating system "{0}"'.format(name)
)
def delete(self, os_name, really=True):
"""Delete operating system from UI."""
self.delete_entity(
os_name,
really,
locators['operatingsys.delete']
)
def update(self, os_name, new_name=None,
major_version=None, minor_version=None,
description=None, os_family=None, archs=None,
ptables=None, mediums=None, new_archs=None,
new_ptables=None, new_mediums=None, select=False,
template=None):
"""Update all entities(arch, Partition table, medium) of OS from UI."""
element = self.search(os_name)
self.click(element)
if new_name:
if self.wait_until_element(locators['operatingsys.name']):
self.field_update('operatingsys.name', new_name)
if major_version:
if self.wait_until_element(
locators['operatingsys.major_version']):
self.field_update(
'operatingsys.major_version', major_version)
self._configure_os(
archs,
ptables,
mediums,
select,
minor_version,
description,
os_family,
template,
arch_list=new_archs,
ptable_list=new_ptables,
medium_list=new_mediums
)
self.click(common_locators['submit'])
def set_os_parameter(self, os_name, param_name, param_value):
"""Add new OS parameter."""
element = self.search(os_name)
self.click(element)
self.set_parameter(param_name, param_value)
def remove_os_parameter(self, os_name, param_name):
"""Remove selected OS parameter."""
element = self.search(os_name)
self.click(element)
self.remove_parameter(param_name)
def get_selected_entities(self):
"""Function to get selected elements (either it is a check-box or
selection list).
"""
selected_element = self.wait_until_element(
common_locators['selected_entity'])
checked_element = self.find_element(common_locators['checked_entity'])
if selected_element:
entity_value = selected_element.text
else:
entity_value = checked_element.text
return entity_value
def get_os_entities(self, os_name, entity_name=None):
"""Assert OS name, minor, major_version, os_family, template, media,
and partition table to validate results.
"""
name_loc = locators['operatingsys.name']
major_ver_loc = locators['operatingsys.major_version']
minor_ver_loc = locators['operatingsys.minor_version']
os_family_loc = locators['operatingsys.fetch_family']
element = self.search(os_name)
self.click(element)
if self.wait_until_element(locators['operatingsys.name']):
result = dict([('name', None), ('major', None),
('minor', None), ('os_family', None),
('ptable', None), ('template', None),
('medium', None)])
result['name'] = self.find_element(
name_loc).get_attribute('value')
result['major'] = self.find_element(
major_ver_loc).get_attribute('value')
result['minor'] = self.find_element(
minor_ver_loc).get_attribute('value')
result['os_family'] = self.find_element(os_family_loc).text
if entity_name == 'ptable':
self.click(tab_locators['operatingsys.tab_ptable'])
result['ptable'] = self.get_selected_entities()
elif entity_name == 'medium':
self.click(tab_locators['operatingsys.tab_medium'])
result['medium'] = self.get_selected_entities()
elif entity_name == 'template':
self.click(tab_locators['operatingsys.tab_templates'])
result['template'] = self.find_element(
locators['operatingsys.fetch_template']).text
return result
else:
raise UIError(
u'Could not find the OS name "{0}"'.format(os_name)
)
| danuzclaudes/robottelo | robottelo/ui/operatingsys.py | Python | gpl-3.0 | 8,335 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017,2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import importlib
import logging
import sys
from pathlib import Path
import jsonschema
import snapcraft.yaml_utils.errors
from snapcraft import plugins
from snapcraft.internal import errors
from snapcraft.project import Project
logger = logging.getLogger(__name__)
def load_plugin(
plugin_name: str,
part_name: str,
project: Project,
properties,
part_schema,
definitions_schema,
) -> plugins.v1.PluginV1:
local_plugins_dir = project._get_local_plugins_dir()
if local_plugins_dir is not None:
plugin_class = _get_local_plugin_class(
plugin_name=plugin_name, local_plugins_dir=local_plugins_dir
)
if plugin_class is None:
plugin_class = plugins.get_plugin_for_base(
plugin_name, build_base=project._get_build_base()
)
if issubclass(plugin_class, plugins.v2.PluginV2):
plugin_schema = plugin_class.get_schema()
options = _make_options(
part_name, part_schema, definitions_schema, properties, plugin_schema
)
plugin = plugin_class(part_name=part_name, options=options)
else:
plugin_schema = plugin_class.schema()
_validate_pull_and_build_properties(
plugin_name, plugin_class, part_schema, definitions_schema
)
options = _make_options(
part_name, part_schema, definitions_schema, properties, plugin_schema
)
plugin = plugin_class(part_name, options, project)
if project.is_cross_compiling:
logger.debug(
"Setting {!r} as the compilation target for {!r}".format(
project.deb_arch, plugin_name
)
)
plugin.enable_cross_compilation()
return plugin
def _load_compat_x_prefix(plugin_name: str, module_name: str, local_plugin_dir: str):
compat_path = Path(local_plugin_dir, f"x-{plugin_name}.py")
if not compat_path.exists():
return None
preferred_name = f"{module_name}.py"
logger.warning(
f"Legacy plugin name detected, please rename the plugin's file name {compat_path.name!r} to {preferred_name!r}."
)
spec = importlib.util.spec_from_file_location(plugin_name, compat_path)
if spec.loader is None:
return None
# Prevent mypy type complaints by asserting type.
assert isinstance(spec.loader, importlib.abc.Loader)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _load_local(plugin_name: str, local_plugin_dir: str):
module_name = plugin_name.replace("-", "_")
module = _load_compat_x_prefix(plugin_name, module_name, local_plugin_dir)
if module is None:
sys.path = [local_plugin_dir] + sys.path
logger.debug(
f"Loading plugin module {module_name!r} with sys.path {sys.path!r}"
)
try:
module = importlib.import_module(module_name)
finally:
sys.path.pop(0)
return module
def _get_local_plugin_class(*, plugin_name: str, local_plugins_dir: str):
with contextlib.suppress(ImportError):
module = _load_local(plugin_name, local_plugins_dir)
logger.info(f"Loaded local plugin for {plugin_name}")
# v2 requires plugin implementation to be named "PluginImpl".
if hasattr(module, "PluginImpl") and issubclass(
module.PluginImpl, plugins.v2.PluginV2
):
return module.PluginImpl
for attr in vars(module).values():
if not isinstance(attr, type):
continue
if not issubclass(attr, plugins.v1.PluginV1):
continue
if not hasattr(attr, "__module__"):
continue
logger.debug(
f"Plugin attribute {attr!r} has __module__: {attr.__module__!r}"
)
if attr.__module__.startswith("snapcraft.plugins"):
continue
return attr
else:
raise errors.PluginError(f"unknown plugin: {plugin_name!r}")
def _validate_pull_and_build_properties(
plugin_name, plugin, part_schema, definitions_schema
):
merged_schema = _merged_part_and_plugin_schemas(
part_schema, definitions_schema, plugin.schema()
)
merged_properties = merged_schema["properties"]
# First, validate pull properties
invalid_properties = _validate_step_properties(
plugin.get_pull_properties(), merged_properties
)
if invalid_properties:
raise errors.InvalidPullPropertiesError(plugin_name, list(invalid_properties))
# Now, validate build properties
invalid_properties = _validate_step_properties(
plugin.get_build_properties(), merged_properties
)
if invalid_properties:
raise errors.InvalidBuildPropertiesError(plugin_name, list(invalid_properties))
def _validate_step_properties(step_properties, schema_properties):
invalid_properties = set()
for step_property in step_properties:
if step_property not in schema_properties:
invalid_properties.add(step_property)
return invalid_properties
def _make_options(
part_name, part_schema, definitions_schema, properties, plugin_schema
):
# Make copies as these dictionaries are tampered with
part_schema = part_schema.copy()
properties = properties.copy()
plugin_schema = _merged_part_and_plugin_schemas(
part_schema, definitions_schema, plugin_schema
)
try:
jsonschema.validate(properties, plugin_schema)
except jsonschema.ValidationError as e:
error = snapcraft.yaml_utils.errors.YamlValidationError.from_validation_error(e)
raise errors.PluginError(
"properties failed to load for {}: {}".format(part_name, error.message)
)
return _populate_options(properties, plugin_schema)
def _merged_part_and_plugin_schemas(part_schema, definitions_schema, plugin_schema):
plugin_schema = plugin_schema.copy()
if "properties" not in plugin_schema:
plugin_schema["properties"] = {}
if "definitions" not in plugin_schema:
plugin_schema["definitions"] = {}
# The part schema takes precedence over the plugin's schema.
plugin_schema["properties"].update(part_schema)
plugin_schema["definitions"].update(definitions_schema)
return plugin_schema
def _populate_options(properties, schema):
class Options:
pass
options = Options()
schema_properties = schema.get("properties", {})
for key in schema_properties:
attr_name = key.replace("-", "_")
default_value = schema_properties[key].get("default")
attr_value = properties.get(key, default_value)
setattr(options, attr_name, attr_value)
return options
| chipaca/snapcraft | snapcraft/internal/pluginhandler/_plugin_loader.py | Python | gpl-3.0 | 7,482 |
# -*- coding: utf-8 -*-
from SPARQLWrapper import SPARQLWrapper, JSON
import ast
def queryDBnary(server, lemma, pos, language):
sparql = SPARQLWrapper(server)
sparql.setQuery("""
SELECT COUNT(distinct ?s) as ?numssenses WHERE {
?e a lemon:LexicalEntry;
dcterms:language lexvo:"""+language+""";
lemon:canonicalForm ?cf;
lemon:sense ?s.
?cf lemon:writtenRep ?wf.
FILTER(regex(?wf,"^"""+lemma+"""$","i"))
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results)
for result in results["results"]["bindings"]:
return print(result["numssenses"]["value"])
return 0
#lexinfo:partOfSpeech lexinfo:"""+pos+""";
def queryDBnaryFull(server, language_code_iso, language_code_dbnary):
fcorresp=open("corresp."+language_code_iso,"r")
line=fcorresp.readline()
line=fcorresp.readline().strip()
dict_corresp=ast.literal_eval(line)
fdict=open("dictionnary."+language_code_iso,"w")
sparql = SPARQLWrapper(server)
sparql.setQuery("""
PREFIX lexvo: <http://lexvo.org/id/iso639-3/>
SELECT ?wf, ?pos, ?numsenses WHERE {
SELECT ?wf, ?pos, COUNT(?s) as ?numsenses FROM <http://kaiko.getalp.org/dbnary/"""+language_code_dbnary+"""> WHERE {
?e a lemon:LexicalEntry;
dbnary:partOfSpeech ?pos;
lemon:canonicalForm ?cf.
?cf lemon:writtenRep ?wf.
?e lemon:sense ?s.
}
GROUP BY ?wf ?pos ORDER BY ?wf
}
LIMIT 10000
OFFSET 0
""")
#lemon:canonicalForm ?cf;
#?cf lemon:writtenRep ?wf.
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
#print(results)
limite=0
while len(results["results"]["bindings"]) > 1:
for result in results["results"]["bindings"]:
if result["pos"]["value"] in dict_corresp:
rep_pos=dict_corresp[result["pos"]["value"]]
else:
rep_pos="<unknown>"
fdict.write(result["wf"]["value"] + "\t" + rep_pos + "\t"+ result["numsenses"]["value"]+"\n")
limite=limite+10000
sparql.setQuery("""
PREFIX lexvo: <http://lexvo.org/id/iso639-3/>
SELECT ?wf, ?pos, ?numsenses WHERE {
SELECT ?wf, ?pos, COUNT(?s) as ?numsenses FROM <http://kaiko.getalp.org/dbnary/"""+language_code_dbnary+"""> WHERE {
?e a lemon:LexicalEntry;
dbnary:partOfSpeech ?pos;
lemon:canonicalForm ?cf.
?cf lemon:writtenRep ?wf.
?e lemon:sense ?s.
}
GROUP BY ?wf ?pos ORDER BY ?wf
}
LIMIT 10000
OFFSET """+str(limite)+"""
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
fdict.close()
#sparql = SPARQLWrapper(server)
#sparql.setQuery("""
#PREFIX lexvo: <http://lexvo.org/id/iso639-3/>
#SELECT ?wf, ?pos, COUNT(?s) as ?numsenses FROM <http://kaiko.getalp.org/dbnary/fra> WHERE {
#?e a lemon:LexicalEntry;
#dbnary:partOfSpeech ?pos;
#lemon:canonicalForm ?cf.
#?cf lemon:writtenRep ?wf.
#?e lemon:sense ?s.
#} GROUP BY ?wf ?pos ORDER BY ?wf
#OFFSET 10000
#LIMIT 10000
#""")
##lemon:canonicalForm ?cf;
##?cf lemon:writtenRep ?wf.
#sparql.setReturnFormat(JSON)
#results = sparql.query().convert()
#print(results)
#for result in results["results"]["bindings"]:
#print(result["numsenses"]["value"])
#sparql.setQuery("""
#PREFIX lexvo: <http://lexvo.org/id/iso639-3/>
#SELECT ?wf, ?pos, COUNT(?s) as ?numsenses FROM <http://kaiko.getalp.org/dbnary/fra> WHERE {
#?e a lemon:LexicalEntry;
#dbnary:partOfSpeech ?pos;
#lemon:canonicalForm ?cf.
#?cf lemon:writtenRep ?wf.
#?e lemon:sense ?s.
#} GROUP BY ?wf ?pos ORDER BY ?wf OFFSET 20000
#""")
##lemon:canonicalForm ?cf;
##?cf lemon:writtenRep ?wf.
#sparql.setReturnFormat(JSON)
#results = sparql.query().convert()
#print(results)
#?e a lemon:LexicalEntry;
#for result in results["results"]["bindings"]:
#return print(result["numssenses"]["value"])
return 0
#lexinfo:partOfSpeech lexinfo:"""+pos+""";
"""
SELECT COUNT(distinct ?s) as ?numssenses WHERE {
?e a lemon:LexicalEntry;
dcterms:language lexvo:fra;
lexinfo:partOfSpeech lexinfo:noun;
lemon:canonicalForm ?cf;
lemon:sense ?s.
?cf lemon:writtenRep ?wf.
FILTER(regex(?wf,"^chat$","i"))
}
"""
if __name__ == "__main__":
langex=["eng","fra","rus","spa","deu"]
langsh=["en","fr","ru","es","de"]
langex=["eng","fra","spa"]
langsh=["en","fr","es"]
for i in range(0,len(langsh)):
print("Extraction of the dictionnary for language "+langsh[i])
queryDBnaryFull("http://kaiko.getalp.org/sparql", langsh[i], langex[i])
| besacier/WCE-LIG | tools/dbnary/extract_dbnary_data.py | Python | gpl-3.0 | 4,673 |
"""
Python Image Processor
"""
from kaa import imlib2
from sys import argv
import glob
import pprint
import time
try:
import json
except ImportError:
import simplejson as json
#Begin Functions#
def fetch_sort(folder, type):
files = glob.glob(str(folder)+'/*.'+str(type))
files.sort()
file_cnt = len(files)
return files, file_cnt
def overlay(background, watermark, x, y, dest):
img = imlib2.open(background)
img2 = imlib2.open(watermark)
img.blend(img2, src_pos=(x, y))
img.save(dest)
def calculate_tween_vals(current_x, current_y, future_x, future_y, duration):
x_speed = ( future_x - current_x ) / (duration-1) #Get X Speed
y_speed = ( future_y - current_y ) / (duration-1) #Get Y Speed
x_new_position = current_x #Placeholder for position per frame
y_new_position = current_y # ''
values = [(current_x, current_y)]
for x in range (1, (duration-1)):
x_new_position = x_new_position + x_speed
y_new_position = y_new_position + y_speed
values.append(x)
values[x] = (x_new_position, y_new_position)
values.append(duration-1)
values[duration-1] = (future_x, future_y)
return values
def tween(bgim_folder, type, watermark, start_x, start_y, end_x, end_y):
files, file_cnt = fetch_sort(bgim_folder, type)
tween_v = calculate_tween_vals(start_x, start_y, end_x, end_y, file_cnt)
itinerator = 0
for x in files:
overlay(x, watermark, tween_v[itinerator][0], tween_v[itinerator][1], x)
itinerator = itinerator + 1
#End Functions#
print 'Python Image Processor'
if len(argv) != 8:
print 'Error: invalid number of arguments'
if str(argv[1]) == 'tween':
tween(argv[2], argv[3], argv[4], int(argv[5]), int(argv[6]), int(argv[7]), int(argv[8]))
elif str(argv[1]) == 'overlay':
overlay(argv[2], argv[3], argv[4], argv[5], argv[6])
else:
print '(!)Function not recognized'
| cgranados/PHPMGen | src/python/Sequencer.py | Python | mit | 2,011 |
__time = __import__('time')
exec "\n".join("%s = __time.%s" % (var, var) for var in dir(__time))
__patched__ = ['sleep']
from eventlet.greenthread import sleep
sleep # silence pyflakes
| simplegeo/eventlet | eventlet/green/time.py | Python | mit | 185 |
import datetime
import hashlib
import json
from django.db import models
class MonolithRecord(models.Model):
"""Data stored temporarily for monolith.
It contains a key (e.g. "app.install"), the date of the record, a user (a
string representing a unique user) and a value (which internally is stored
as a JSON object).
"""
key = models.CharField(max_length=255)
recorded = models.DateTimeField(db_index=True)
user_hash = models.CharField(max_length=255, blank=True)
value = models.TextField()
class Meta:
db_table = 'monolith_record'
def get_user_hash(request):
"""Get a hash identifying an user.
It's a hash of session key, ip and user agent
"""
ip = request.META.get('REMOTE_ADDR', '')
ua = request.META.get('User-Agent', '')
session_key = request.session.session_key or ''
return hashlib.sha1('-'.join(map(str, (ip, ua, session_key)))).hexdigest()
def record_stat(key, request, **data):
"""Create a new record in the database with the given values.
:param key:
The type of stats you're sending, e.g. "app.install".
:param request:
The request associated with this call. It will be used to define who
the user is.
:para: data:
The data you want to store. You can pass the data to this function as
named arguments.
"""
if '__recorded' in data:
recorded = data.pop('__recorded')
else:
recorded = datetime.datetime.utcnow()
if not data:
raise ValueError('You should at least define one value')
record = MonolithRecord(key=key, user_hash=get_user_hash(request),
recorded=recorded, value=json.dumps(data))
record.save()
return record
| ingenioustechie/zamboni | mkt/monolith/models.py | Python | bsd-3-clause | 1,757 |
###############################################################################
# The MIT License (MIT)
# Copyright (c) 2007-2016 Roman Rodyakin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
from buildsupport.compilers import Compiler
from buildsupport.PlatformDetection import getTargetOsName
class Gcc(Compiler):
"""
A GCC-specific compiler instance. See Compilation.py and Linking.py for
more.
"""
def __init__(self, executable, version):
Compiler.__init__(self, "gcc", executable, version)
self._versionList = self._version.split('.')
assert len(version) > 1
try:
# If this is not flexible enough (i.e. versions like 3.2.3a are
# possible), change this to individual assignments. We currently
# depend on self._version[0] being a number
self._versionList = list(map(int, self._versionList))
except:
assert False
def getCompilerArgv(self,
sourceFilePath,
outputFilePath,
defines,
headerDirs,
targetType,
exportedSharedApis,
importedSharedApis,
cppStandard,
disableWarnings,
disableThreading,
optimize):
"""
Returns a list of strings that is a valid argv for the compiler
invocation in order to compile a C++ module.
The sourceFilePath specifies the location of the C++ source file.
The outputFilePath parameter specifies the location of the resultant
object file.
The headerDirs parameter is a list of directories that the compiler will
need to search for all the header files used by the module.
The defines parameter is a mapping of preprocessor symbols into their
values. The resultant list will contain the compiler flags that define
those symbols with the relevant values.
The targetType parameter is one of: "sharedlib", "executable",
"staticlib". It is needed as the compiler might require additional
flags depending on what type of target the resulting object file will
end up in.
The exportedSharedApis and importedSharedApis parameters are lists
denoting the APIs that are exported or imported by this module. See
Compilation.py for more on this.
The disableWarnings parameter disables compilation warnings. This is
currently used on external libraries only.
The disableThreading parameter disables preprocessor flags necessary
for threading support.
The optimize parameter is set to True if the caller would like the
compiler to optimize the generated code.
"""
assert sourceFilePath and isinstance(sourceFilePath, str)
assert outputFilePath and isinstance(outputFilePath, str)
assert targetType in ["sharedlib", "staticlib", "executable"]
result = [self._executable, "-c"]
if targetType == "sharedlib":
if getTargetOsName() == "darwin":
result.append("-dynamic")
elif getTargetOsName() != "cygwin":
# See the GCC manpage for more on this. It essentially says
# that on m88k, Sparc, m68k and on RS/6000 you might want to try
# -fpic first instead. This is not currently a practical issue,
# fix when it becomes one.
result.append("-fPIC")
result.append("-std=%s" % cppStandard)
if not disableThreading:
result.append("-pthread")
result.append("-fstack-protector-strong")
result.append("-fdiagnostics-color")
if disableWarnings:
result.append("-w")
else:
result.append("-Wall")
result.append("-Wno-sign-compare")
if optimize:
result.append("-O3")
result.append("-flto")
else:
result.append("-g")
result += ["-I%s" % dir for dir in headerDirs]
for symbol in defines:
if defines[symbol] is None:
result.append("-D%s" % symbol)
else:
result.append("-D%s=%s" % (symbol, defines[symbol]))
if self._versionList[0] >= 4:
result.append("-fvisibility=hidden")
sharedLibExport = '-D%s_API=__attribute__((visibility("default")))'
sharedLibImport = '-D%s_API=__attribute__((visibility("hidden")))'
sharedLibTypeInfoExport = '-D%s_TYPEINFO_API=__attribute__((visibility("default")))'
sharedLibTypeInfoImport = '-D%s_TYPEINFO_API=__attribute__((visibility("default")))'
if exportedSharedApis:
for api in exportedSharedApis:
result.append(sharedLibExport % api.upper())
result.append(sharedLibTypeInfoExport % api.upper())
if importedSharedApis:
for api in importedSharedApis:
result.append(sharedLibImport % api.upper())
result.append(sharedLibTypeInfoImport % api.upper())
result.append("-o")
result.append(outputFilePath)
result.append(sourceFilePath)
return result
def getSharedLinkerArgv(self,
sources,
output,
major,
minor,
soname,
libDirs,
dependencies,
optimize,
disableThreading):
"""
Returns a list of strings that is a valid argv for the compiler
invocation in order to link a set of object modules into a shared
library.
The sources parameter is the list of strings, each string denoting a
module to be linked into the shared library.
The output parameter is a string denoting the location of the resulting
shared library.
The major and minor parameters are the "major" and "minor" versions of
the resulting shared library. Their exact meaning varies based on
platform. See PlatformSettings.py for more.
The soname parameter is used for setting the DT_SONAME field in the
resulting ELF object. It will be ignored (and should be None) on
non-ELF platforms. For the meaning of DT_SONAME, check the GNU loader's
-soname=name option. Practically, on ELF systems this should be the
output file name without the minor version number.
The libDirs parameter is a list of directories that the shared linker will
need to search for all the header files used by the module.
The dependencies parameter is the list of libraries to link with.
The optimize parameter is this build's optimization setting (true or false).
The disableThreading parameter disables linker flags related to
threading support in the shared library.
"""
result = [self._executable]
if getTargetOsName() == "darwin":
result += ["-dynamiclib"]
result += ["-current_version", "%s.%s" % (major, minor)]
result += ["-compatibility_version", "%s.0" % major]
else:
result += ["-shared"]
result += ["-Wl,-soname,%s" % soname]
if optimize:
result.append("-flto")
result.append("-O3")
if not disableThreading:
result.append("-pthread")
for libdir in libDirs:
result.append("-L%s" % libdir)
result += sources
for dependency in dependencies:
result.append("-l%s" % dependency)
result.append("-o")
result.append(output)
return result
def getExecutableLinkerArgv(self,
sources,
output,
libDirs,
dependencies,
optimize,
disableThreading):
"""
Returns a list of strings that is a valid argv for the compiler
invocation in order to link a set of object modules into an executable.
The sources parameter is the list of strings, each string denoting a
module to be linked into the shared library.
The output parameter is a string denoting the location of the resulting
shared library.
The libDirs parameter is a list of directories that the shared linker will
need to search for all the header files used by the module.
The dependencies parameter is the list of libraries to link with.
The optimize parameter is this build's optimization setting (true or false).
The disableThreading parameter disables linker flags related to
threading support in the executable.
"""
result = [self._executable]
if optimize:
result.append("-flto")
result.append("-O3")
if not disableThreading:
result.append("-pthread")
for libdir in libDirs:
result.append("-L%s" % libdir)
result += sources
for dependency in dependencies:
result.append("-l%s" % dependency)
result.append("-o")
result.append(output)
return result
| rodyakin/chilly-build | share/chilly-build/buildsupport/compilers/gcc.py | Python | mit | 10,751 |
import time
import gevent
import collections
from datetime import datetime
from sqlalchemy import asc, desc, bindparam
from inbox.api.kellogs import APIEncoder, encode
from inbox.models import Transaction, Message, Thread, Account, Namespace
from inbox.models.session import session_scope
from inbox.models.util import transaction_objects
from inbox.sqlalchemy_ext.util import bakery
EVENT_NAME_FOR_COMMAND = {
'insert': 'create',
'update': 'modify',
'delete': 'delete'
}
def get_transaction_cursor_near_timestamp(namespace_id, timestamp, db_session):
"""
Exchange a timestamp for a 'cursor' into the transaction log entry near
to that timestamp in age. The cursor is the public_id of that transaction
(or '0' if there are no such transactions).
Arguments
---------
namespace_id: int
Id of the namespace for which to get a cursor.
timestamp: int
Unix timestamp
db_session: new_session
database session
Returns
-------
string
A transaction public_id that can be passed as a 'cursor' parameter by
API clients.
"""
dt = datetime.utcfromtimestamp(timestamp)
# We want this guarantee: if you pass a timestamp for, say,
# '2015-03-20 12:22:20', and you have multiple transactions immediately
# prior, e.g.:
# id | created_at
# ---+-----------
# 23 | 2015-03-20 12:22:19
# 24 | 2015-03-20 12:22:19
# 25 | 2015-03-20 12:22:19
# then you get the last one by id (25). Otherwise you might pass a
# timestamp far in the future, but not actually get the last cursor.
# The obvious way to accomplish this is to filter by `created_at` but order
# by `id`. However, that causes MySQL to perform a potentially expensive
# filesort. Instead, get transactions with timestamp *matching* the last
# one before what you have, and sort those by id:
latest_timestamp = db_session.query(Transaction.created_at). \
order_by(desc(Transaction.created_at)). \
filter(Transaction.created_at < dt,
Transaction.namespace_id == namespace_id).limit(1).subquery()
latest_transaction = db_session.query(Transaction). \
filter(Transaction.created_at == latest_timestamp,
Transaction.namespace_id == namespace_id). \
order_by(desc(Transaction.id)).first()
if latest_transaction is None:
# If there are no earlier deltas, use '0' as a special stamp parameter
# to signal 'process from the start of the log'.
return '0'
return latest_transaction.public_id
def _get_last_trx_id_for_namespace(namespace_id, db_session):
q = bakery(lambda session: session.query(Transaction.id))
q += lambda q: q.filter(
Transaction.namespace_id == bindparam('namespace_id'))
q += lambda q: q.order_by(desc(Transaction.created_at)).\
order_by(desc(Transaction.id)).limit(1)
return q(db_session).params(namespace_id=namespace_id).one()[0]
def format_transactions_after_pointer(namespace, pointer, db_session,
result_limit, exclude_types=None,
include_types=None, exclude_folders=True,
exclude_metadata=True, exclude_account=True,
expand=False, is_n1=False):
"""
Return a pair (deltas, new_pointer), where deltas is a list of change
events, represented as dictionaries:
{
"object": <API object type, e.g. "thread">,
"event": <"create", "modify", or "delete>,
"attributes": <API representation of the object for insert/update events>
"cursor": <public_id of the transaction>
}
and new_pointer is the integer id of the last included transaction
Arguments
---------
namespace_id: int
Id of the namespace for which to get changes.
pointer: int
Process transactions starting after this id.
db_session: new_session
database session
result_limit: int
Maximum number of results to return. (Because we may roll up multiple
changes to the same object, fewer results can be returned.)
format_transaction_fn: function pointer
Function that defines how to format the transactions.
exclude_types: list, optional
If given, don't include transactions for these types of objects.
"""
exclude_types = set(exclude_types) if exclude_types else set()
# Begin backwards-compatibility shim -- suppress new object types for now,
# because clients may not be able to deal with them.
if exclude_folders is True:
exclude_types.update(('folder', 'label'))
if exclude_account is True:
exclude_types.add('account')
# End backwards-compatibility shim.
# Metadata is excluded by default, and can only be included by setting the
# exclude_metadata flag to False. If listed in include_types, remove it.
if exclude_metadata is True:
exclude_types.add('metadata')
if include_types is not None and 'metadata' in include_types:
include_types.remove('metadata')
last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
if last_trx == pointer:
return ([], pointer)
while True:
transactions = db_session.query(Transaction). \
filter(
Transaction.id > pointer,
Transaction.namespace_id == namespace.id)
if exclude_types is not None:
transactions = transactions.filter(
~Transaction.object_type.in_(exclude_types))
if include_types is not None:
transactions = transactions.filter(
Transaction.object_type.in_(include_types))
transactions = transactions. \
order_by(asc(Transaction.id)).limit(result_limit).all()
if not transactions:
return ([], pointer)
results = []
# Group deltas by object type.
trxs_by_obj_type = collections.defaultdict(list)
for trx in transactions:
trxs_by_obj_type[trx.object_type].append(trx)
for obj_type, trxs in trxs_by_obj_type.items():
# Build a dictionary mapping pairs (record_id, command) to
# transaction. If successive modifies for a given record id appear
# in the list of transactions, this will only keep the latest
# one (which is what we want).
latest_trxs = {(trx.record_id, trx.command): trx for trx in
sorted(trxs, key=lambda t: t.id)}.values()
# Load all referenced not-deleted objects.
ids_to_query = [trx.record_id for trx in latest_trxs
if trx.command != 'delete']
object_cls = transaction_objects()[obj_type]
if object_cls == Account:
# The base query for Account queries the /Namespace/ table
# since the API-returned "`account`" is a `namespace`
# under-the-hood.
query = db_session.query(Namespace).join(Account).filter(
Account.id.in_(ids_to_query),
Namespace.id == namespace.id)
# Key by /namespace.account_id/ --
# namespace.id may not be equal to account.id
# and trx.record_id == account.id for `account` trxs.
objects = {obj.account_id: obj for obj in query}
else:
query = db_session.query(object_cls).filter(
object_cls.id.in_(ids_to_query),
object_cls.namespace_id == namespace.id)
if object_cls == Thread:
query = query.options(*Thread.api_loading_options(expand))
if object_cls == Message:
query = query.options(*Message.api_loading_options(expand))
# T7045: Workaround for some SQLAlchemy bugs.
objects = {obj.id: obj for obj in query if obj.thread is not None}
else:
objects = {obj.id: obj for obj in query}
for trx in latest_trxs:
delta = {
'object': trx.object_type,
'event': EVENT_NAME_FOR_COMMAND[trx.command],
'id': trx.object_public_id,
'cursor': trx.public_id
}
if trx.command != 'delete':
obj = objects.get(trx.record_id)
if obj is None:
continue
repr_ = encode(
obj, namespace_public_id=namespace.public_id,
expand=expand, is_n1=is_n1)
delta['attributes'] = repr_
results.append((trx.id, delta))
if results:
# Sort deltas by id of the underlying transactions.
results.sort()
deltas = [d for _, d in results]
return (deltas, results[-1][0])
else:
# It's possible that none of the referenced objects exist any more,
# meaning the result list is empty. In that case, keep traversing
# the log until we get actual results or reach the end.
pointer = transactions[-1].id
def streaming_change_generator(namespace, poll_interval, timeout,
transaction_pointer, exclude_types=None,
include_types=None, exclude_folders=True,
exclude_metadata=True, exclude_account=True,
expand=False, is_n1=False):
"""
Poll the transaction log for the given `namespace_id` until `timeout`
expires, and yield each time new entries are detected.
Arguments
---------
namespace_id: int
Id of the namespace for which to check changes.
poll_interval: float
How often to check for changes.
timeout: float
How many seconds to allow the connection to remain open.
transaction_pointer: int, optional
Yield transaction rows starting after the transaction with id equal to
`transaction_pointer`.
"""
encoder = APIEncoder(is_n1=is_n1)
start_time = time.time()
while time.time() - start_time < timeout:
with session_scope(namespace.id) as db_session:
deltas, new_pointer = format_transactions_after_pointer(
namespace, transaction_pointer, db_session, 100,
exclude_types, include_types, exclude_folders,
exclude_metadata, exclude_account, expand=expand, is_n1=is_n1)
if new_pointer is not None and new_pointer != transaction_pointer:
transaction_pointer = new_pointer
for delta in deltas:
yield encoder.cereal(delta) + '\n'
else:
yield '\n'
gevent.sleep(poll_interval)
| nylas/sync-engine | inbox/transactions/delta_sync.py | Python | agpl-3.0 | 10,974 |
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import sfml as sf
def main():
window = sf.RenderWindow(sf.VideoMode(640, 480), 'SFML vertices example')
window.framerate_limit = 60
running = True
vertices = [sf.Vertex((200, 150), sf.Color.RED),
sf.Vertex((200, 350), sf.Color.BLUE),
sf.Vertex((400, 350), sf.Color.GREEN),
sf.Vertex((400, 150), sf.Color.YELLOW)]
while running:
for event in window.iter_events():
if event.type == sf.Event.CLOSED:
running = False
window.clear(sf.Color.WHITE)
window.draw(vertices, sf.TRIANGLES)
window.display()
window.close()
if __name__ == '__main__':
main()
| bastienleonard/pysfml-cython | examples/vertices.py | Python | bsd-2-clause | 737 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
import sys
if sys.platform.startswith('win'):
# Use the |git.bat| in the depot_tools/ on Windows.
GIT = 'git.bat'
else:
GIT = 'git'
def list_grds_in_repository(repo_path):
"""Returns a list of all the grd files in the current git repository."""
# This works because git does its own glob expansion even though there is no
# shell to do it.
# TODO(meacer): This should use list_grds_in_repository() from the internal
# translate.py.
output = subprocess.check_output([GIT, 'ls-files', '--', '*.grd'],
cwd=repo_path)
return output.strip().splitlines()
def git_add(files, repo_root):
"""Adds relative paths given in files to the current CL."""
# Upload in batches in order to not exceed command line length limit.
BATCH_SIZE = 50
added_count = 0
while added_count < len(files):
batch = files[added_count:added_count + BATCH_SIZE]
command = [GIT, 'add'] + batch
subprocess.check_call(command, cwd=repo_root)
added_count += len(batch)
| endlessm/chromium-browser | tools/translation/helper/git_helper.py | Python | bsd-3-clause | 1,210 |
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)#?, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/Paste-1.7.5.1-py2.7.egg/paste/util/doctest24.py | Python | apache-2.0 | 99,418 |
#!/usr/bin/python
#------------------------------------------------------------------------------
#
# Copyright (C) 2016 Cisco Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
from ansible.module_utils.basic import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from iosxr_common import *
from iosxr import *
DOCUMENTATION = """
---
module: iosxr_clear_log
author: Adisorn Ermongkonchai
short_description: Clear system log
description:
- Clear system log
options:
host:
description:
- IP address or hostname (resolvable by Ansible control host) of
the target IOS-XR node.
required: true
username:
description:
- username used to login to IOS-XR
required: false
default: none
password:
description:
- password used to login to IOS-XR
required: false
default: none
"""
EXAMPLES = """
- iosxr_clear_log:
host: '{{ ansible_ssh_host }}'
username: cisco
password: cisco
"""
RETURN = """
stdout:
description: raw response
returned: always
stdout_lines:
description: list of response lines
returned: always
"""
CLI_PROMPTS_RE.append(re.compile(r'[\r\n]?[>|#|%|:](?:\s*)$'))
def main():
module = get_module(
argument_spec = dict(
username = dict(required=False, default=None),
password = dict(required=False, default=None),
),
supports_check_mode = False
)
commands = ['clear logging']
commands.append('y')
response = execute_command(module, commands)
result = dict(changed=True)
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return module.exit_json(**result)
if __name__ == "__main__":
main()
| ios-xr/iosxr-ansible | local/library/iosxr_clear_log.py | Python | gpl-3.0 | 2,465 |
# -*- coding: utf-8 -*-
"""
This file contains the dummy for a magnet interface.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from collections import OrderedDict
from core.base import Base
from interface.magnet_interface import MagnetInterface
class MagnetAxisDummy:
""" Generic dummy magnet representing one axis. """
def __init__(self, label):
self.label = label
self.pos = 0.0
self.status = 0, {0: 'MagnetDummy Idle'}
class MagnetDummy(Base, MagnetInterface):
"""This is the Interface class to define the controls for the simple
magnet hardware.
"""
_modtype = 'MagnetDummy'
_modclass = 'hardware'
_out = {'magnetstage': 'MagnetInterface'}
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
self.log.info('The following configuration was found.')
# checking for the right configuration
for key in config.keys():
self.log.info('{0}: {1}'.format(key,config[key]))
#these label should be actually set by the config.
self._x_axis = MagnetAxisDummy('x')
self._y_axis = MagnetAxisDummy('y')
self._z_axis = MagnetAxisDummy('z')
self._phi_axis = MagnetAxisDummy('phi')
#TODO: Checks if configuration is set and is reasonable
def on_activate(self, e):
""" Definition and initialisation of the GUI.
@param object e: Fysom.event object from Fysom class.
An object created by the state machine module Fysom,
which is connected to a specific event (have a look in
the Base Class). This object contains the passed event,
the state before the event happened and the destination
of the state which should be reached after the event
had happened.
"""
pass
def on_deactivate(self, e):
""" Deactivate the module properly.
@param object e: Fysom.event object from Fysom class. A more detailed
explanation can be found in the method activation.
"""
pass
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the magnet hardware. These
constraints will be passed via the logic to the GUI so
that proper display elements with boundary conditions
could be made.
Provides all the constraints for each axis of a motorized stage
(like total travel distance, velocity, ...)
Each axis has its own dictionary, where the label is used as the
identifier throughout the whole module. The dictionaries for each axis
are again grouped together in a constraints dictionary in the form
{'<label_axis0>': axis0 }
where axis0 is again a dict with the possible values defined below. The
possible keys in the constraint are defined here in the interface file.
If the hardware does not support the values for the constraints, then
insert just None. If you are not sure about the meaning, look in other
hardware files to get an impression.
"""
constraints = OrderedDict()
axis0 = {}
axis0['label'] = self._x_axis.label # name is just as a sanity included
axis0['unit'] = 'm' # the SI units
axis0['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis0['pos_min'] = 0
axis0['pos_max'] = 100e-3 # that is basically the traveling range
axis0['pos_step'] = 0.001e-3
axis0['vel_min'] = 0
axis0['vel_max'] = 100e-3
axis0['vel_step'] = 0.01e-3
axis0['acc_min'] = 0.1e-3
axis0['acc_max'] = 0.0
axis0['acc_step'] = 0.0
axis1 = {}
axis1['label'] = self._y_axis.label # that axis label should be obtained from config
axis1['unit'] = 'm' # the SI units
axis1['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis1['pos_min'] = 0
axis1['pos_max'] = 100e-3 # that is basically the traveling range
axis1['pos_step'] = 0.001e-3
axis1['vel_min'] = 0
axis1['vel_max'] = 100e-3
axis1['vel_step'] = 0.01e-3
axis1['acc_min'] = 0.1e-3
axis1['acc_max'] = 0.0
axis1['acc_step'] = 0.0
axis2 = {}
axis2['label'] = self._z_axis.label # that axis label should be obtained from config
axis2['unit'] = 'm' # the SI units
axis2['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis2['pos_min'] = 0
axis2['pos_max'] = 100e-3 # that is basically the traveling range
axis2['pos_step'] = 0.001e-3
axis2['vel_min'] = 0
axis2['vel_max'] = 100e-3
axis2['vel_step'] = 0.01e-3
axis2['acc_min'] = 0.1e-3
axis2['acc_max'] = 0.0
axis2['acc_step'] = 0.0
axis3 = {}
axis3['label'] = self._phi_axis.label # that axis label should be obtained from config
axis3['unit'] = '°' # the SI units
axis3['ramp'] = ['Sinus','Trapez'] # a possible list of ramps
axis3['pos_min'] = 0
axis3['pos_max'] = 360 # that is basically the traveling range
axis3['pos_step'] = 0.1
axis3['vel_min'] = 1
axis3['vel_max'] = 20
axis3['vel_step'] = 0.1
axis3['acc_min'] = None
axis3['acc_max'] = None
axis3['acc_step'] = None
# assign the parameter container for x to a name which will identify it
constraints[axis0['label']] = axis0
constraints[axis1['label']] = axis1
constraints[axis2['label']] = axis2
constraints[axis3['label']] = axis3
return constraints
def move_rel(self, param_dict):
""" Moves magnet in given direction (relative movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed.
With get_constraints() you can obtain all
possible parameters of that stage. According to
this parameter set you have to pass a dictionary
with keys that are called like the parameters
from get_constraints() and assign a SI value to
that. For a movement in x the dict should e.g.
have the form:
dict = { 'x' : 23 }
where the label 'x' corresponds to the chosen
axis label.
A smart idea would be to ask the position after the movement.
"""
curr_pos_dict = self.get_pos()
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
move_x = param_dict[self._x_axis.label]
curr_pos_x = curr_pos_dict[self._x_axis.label]
if (curr_pos_x + move_x > constraints[self._x_axis.label]['pos_max'] ) or\
(curr_pos_x + move_x < constraints[self._x_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._x_axis.label,
move_x,
constraints[self._x_axis.label]['pos_min'],
constraints[self._x_axis.label]['pos_max']))
else:
self._x_axis.pos = self._x_axis.pos + move_x
if param_dict.get(self._y_axis.label) is not None:
move_y = param_dict[self._y_axis.label]
curr_pos_y = curr_pos_dict[self._y_axis.label]
if (curr_pos_y + move_y > constraints[self._y_axis.label]['pos_max'] ) or\
(curr_pos_y + move_y < constraints[self._y_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._y_axis.label,
move_y,
constraints[self._y_axis.label]['pos_min'],
constraints[self._y_axis.label]['pos_max']))
else:
self._y_axis.pos = self._y_axis.pos + move_y
if param_dict.get(self._z_axis.label) is not None:
move_z = param_dict[self._z_axis.label]
curr_pos_z = curr_pos_dict[self._z_axis.label]
if (curr_pos_z + move_z > constraints[self._z_axis.label]['pos_max'] ) or\
(curr_pos_z + move_z < constraints[self._z_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._z_axis.label,
move_z,
constraints[self._z_axis.label]['pos_min'],
constraints[self._z_axis.label]['pos_max']))
else:
self._z_axis.pos = self._z_axis.pos + move_z
if param_dict.get(self._phi_axis.label) is not None:
move_phi = param_dict[self._phi_axis.label]
curr_pos_phi = curr_pos_dict[self._phi_axis.label]
if (curr_pos_phi + move_phi > constraints[self._phi_axis.label]['pos_max'] ) or\
(curr_pos_phi + move_phi < constraints[self._phi_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._phi_axis.label,
move_phi,
constraints[self._phi_axis.label]['pos_min'],
constraints[self._phi_axis.label]['pos_max']))
else:
self._phi_axis.pos = self._phi_axis.pos + move_phi
def move_abs(self, param_dict):
""" Moves magnet to absolute position (absolute movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <a-value>}.
'axis_label' must correspond to a label given
to one of the axis.
A smart idea would be to ask the position after the movement.
"""
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
desired_pos = param_dict[self._x_axis.label]
constr = constraints[self._x_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._x_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._x_axis.pos = desired_pos
if param_dict.get(self._y_axis.label) is not None:
desired_pos = param_dict[self._y_axis.label]
constr = constraints[self._y_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._y_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._y_axis.pos = desired_pos
if param_dict.get(self._z_axis.label) is not None:
desired_pos = param_dict[self._z_axis.label]
constr = constraints[self._z_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._z_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._z_axis.pos = desired_pos
if param_dict.get(self._phi_axis.label) is not None:
desired_pos = param_dict[self._phi_axis.label]
constr = constraints[self._phi_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is ignored!'.format(
self._phi_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._phi_axis.pos = desired_pos
def abort(self):
""" Stops movement of the stage
@return int: error code (0:OK, -1:error)
"""
self.log.info('MagnetDummy: Movement stopped!')
return 0
def get_pos(self, param_list=None):
""" Gets current position of the magnet stage arms
@param list param_list: optional, if a specific position of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
position is asked.
@return dict: with keys being the axis labels and item the current
position.
"""
pos = {}
if param_list is not None:
if self._x_axis.label in param_list:
pos[self._x_axis.label] = self._x_axis.pos
if self._y_axis.label in param_list:
pos[self._y_axis.label] = self._y_axis.pos
if self._z_axis.label in param_list:
pos[self._z_axis.label] = self._z_axis.pos
if self._phi_axis.label in param_list:
pos[self._phi_axis.label] = self._phi_axis.pos
else:
pos[self._x_axis.label] = self._x_axis.pos
pos[self._y_axis.label] = self._y_axis.pos
pos[self._z_axis.label] = self._z_axis.pos
pos[self._phi_axis.label] = self._phi_axis.pos
return pos
def get_status(self, param_list=None):
""" Get the status of the position
@param list param_list: optional, if a specific status of an axis
is desired, then the labels of the needed
axis should be passed in the param_list.
If nothing is passed, then from each axis the
status is asked.
@return dict: with the axis label as key and the status number as item.
"""
status = {}
if param_list is not None:
if self._x_axis.label in param_list:
status[self._x_axis.label] = self._x_axis.status
if self._y_axis.label in param_list:
status[self._y_axis.label] = self._y_axis.status
if self._z_axis.label in param_list:
status[self._z_axis.label] = self._z_axis.status
if self._phi_axis.label in param_list:
status[self._phi_axis.label] = self._phi_axis.status
else:
status[self._x_axis.label] = self._x_axis.status
status[self._y_axis.label] = self._y_axis.status
status[self._z_axis.label] = self._z_axis.status
status[self._phi_axis.label] = self._phi_axis.status
return status
def calibrate(self, param_list=None):
""" Calibrates the magnet stage.
@param dict param_list: param_list: optional, if a specific calibration
of an axis is desired, then the labels of the
needed axis should be passed in the param_list.
If nothing is passed, then all connected axis
will be calibrated.
@return int: error code (0:OK, -1:error)
After calibration the stage moves to home position which will be the
zero point for the passed axis. The calibration procedure will be
different for each stage.
"""
if param_list is not None:
if self._x_axis.label in param_list:
self._x_axis.pos = 0.0
if self._y_axis.label in param_list:
self._y_axis.pos = 0.0
if self._z_axis.label in param_list:
self._z_axis.pos = 0.0
if self._phi_axis.label in param_list:
self._phi_axis.pos = 0.0
else:
self._x_axis.pos = 0.0
self._y_axis.pos = 0.0
self._z_axis.pos = 0.0
self._phi_axis.pos = 0.0
return 0
def get_velocity(self, param_list=None):
""" Gets the current velocity for all connected axes.
@param dict param_list: optional, if a specific velocity of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
velocity is asked.
@return dict : with the axis label as key and the velocity as item.
"""
vel = {}
if param_list is not None:
if self._x_axis.label in param_list:
vel[self._x_axis.label] = self._x_axis.vel
if self._y_axis.label in param_list:
vel[self._x_axis.label] = self._y_axis.vel
if self._z_axis.label in param_list:
vel[self._x_axis.label] = self._z_axis.vel
if self._phi_axis.label in param_list:
vel[self._phi_axis.label] = self._phi_axis.vel
else:
vel[self._x_axis.label] = self._x_axis.get_vel
vel[self._y_axis.label] = self._y_axis.get_vel
vel[self._z_axis.label] = self._z_axis.get_vel
vel[self._phi_axis.label] = self._phi_axis.vel
return vel
def set_velocity(self, param_dict=None):
""" Write new value for velocity.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-velocity-value>}.
'axis_label' must correspond to a label given
to one of the axis.
"""
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
desired_vel = param_dict[self._x_axis.label]
constr = constraints[self._x_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._x_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._x_axis.vel = desired_vel
if param_dict.get(self._y_axis.label) is not None:
desired_vel = param_dict[self._y_axis.label]
constr = constraints[self._y_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._y_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._y_axis.vel = desired_vel
if param_dict.get(self._z_axis.label) is not None:
desired_vel = param_dict[self._z_axis.label]
constr = constraints[self._z_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._z_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._z_axis.vel = desired_vel
if param_dict.get(self._phi_axis.label) is not None:
desired_vel = param_dict[self._phi_axis.label]
constr = constraints[self._phi_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._phi_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._phi_axis.vel = desired_vel
def tell(self, param_dict=None):
""" Send a command to the magnet.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the command string>}.
'axis_label' must correspond to a label given
to one of the axis.
@return int: error code (0:OK, -1:error)
"""
self.log.info('You can tell the magnet dummy as much as you want, it '
'has always an open ear for you. But do not expect an '
'answer, it is very shy!')
return 0
def ask(self, param_dict=None):
""" Ask the magnet a question.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the question string>}.
'axis_label' must correspond to a label given
to one of the axis.
@return string: contains the answer coming from the magnet
"""
self.log.info('Dude, I am a dummy! Your question(s) "{0}" to the '
'axis "{1}" is/are way to complicated for me :D ! If you '
'want to talk to someone, ask Siri, maybe she will listen to '
'you and answer your questions :P.'.format(
list(param_dict.values()), list(param_dict)))
return_val = {}
for entry in param_dict:
return_val[entry] = 'Nothing to say, Motor is quite.'
return return_val
def set_magnet_idle_state(self, magnet_idle=True):
""" Set the magnet to couple/decouple to/from the control.
@param bool magnet_idle: if True then magnet will be set to idle and
each movement command will be ignored from the
hardware file. If False the magnet will react
on movement changes of any kind.
@return bool: the actual state which was set in the magnet hardware.
True = idle, decoupled from control
False = Not Idle, coupled to control
"""
self._idle_state = magnet_idle
return self._idle_state
def get_magnet_idle_state(self):
""" Retrieve the current state of the magnet, whether it is idle or not.
@return bool: the actual state which was set in the magnet hardware.
True = idle, decoupled from control
False = Not Idle, coupled to control
"""
return self._idle_state
def initialize(self):
"""
Acts as a switch. When all coils of the superconducting magnet are
heated it cools them, else the coils get heated.
@return int: (0: Ok, -1:error)
"""
raise InterfaceImplementationError('magnet_interface>initialize')
return -1
| drogenlied/qudi | hardware/magnet/magnet_dummy.py | Python | gpl-3.0 | 26,940 |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import pytest
import operator
import odl
from odl.util.testutils import (all_equal, all_almost_equal, almost_equal,
noise_elements, simple_fixture)
exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 0.5, 1.5])
def test_emptyproduct():
with pytest.raises(ValueError):
odl.ProductSpace()
reals = odl.RealNumbers()
spc = odl.ProductSpace(field=reals)
assert spc.field == reals
assert spc.size == 0
with pytest.raises(IndexError):
spc[0]
def test_RxR():
H = odl.rn(2)
HxH = odl.ProductSpace(H, H)
# Check the basic properties
assert len(HxH) == 2
assert HxH.shape == (2,)
assert HxH.size == 2
assert HxH.dtype == H.dtype
assert HxH.spaces[0] is H
assert HxH.spaces[1] is H
assert HxH.is_power_space
assert not HxH.is_weighted
v1 = H.element([1, 2])
v2 = H.element([3, 4])
v = HxH.element([v1, v2])
u = HxH.element([[1, 2], [3, 4]])
assert all_equal([v1, v2], v)
assert all_equal([v1, v2], u)
def test_equals_space(exponent):
r2 = odl.rn(2)
r2x3_1 = odl.ProductSpace(r2, 3, exponent=exponent)
r2x3_2 = odl.ProductSpace(r2, 3, exponent=exponent)
r2x4 = odl.ProductSpace(r2, 4, exponent=exponent)
assert r2x3_1 is r2x3_1
assert r2x3_1 is not r2x3_2
assert r2x3_1 is not r2x4
assert r2x3_1 == r2x3_1
assert r2x3_1 == r2x3_2
assert r2x3_1 != r2x4
assert hash(r2x3_1) == hash(r2x3_2)
assert hash(r2x3_1) != hash(r2x4)
def test_equals_vec(exponent):
r2 = odl.rn(2)
r2x3 = odl.ProductSpace(r2, 3, exponent=exponent)
r2x4 = odl.ProductSpace(r2, 4, exponent=exponent)
x1 = r2x3.zero()
x2 = r2x3.zero()
y = r2x3.one()
z = r2x4.zero()
assert x1 is x1
assert x1 is not x2
assert x1 is not y
assert x1 == x1
assert x1 == x2
assert x1 != y
assert x1 != z
def test_is_power_space():
r2 = odl.rn(2)
r2x3 = odl.ProductSpace(r2, 3)
assert len(r2x3) == 3
assert r2x3.is_power_space
assert r2x3.spaces[0] is r2
assert r2x3.spaces[1] is r2
assert r2x3.spaces[2] is r2
r2r2r2 = odl.ProductSpace(r2, r2, r2)
assert r2x3 == r2r2r2
def test_mixed_space():
"""Verify that a mixed productspace is handled properly."""
r2_1 = odl.rn(2, dtype='float64')
r2_2 = odl.rn(2, dtype='float32')
pspace = odl.ProductSpace(r2_1, r2_2)
assert not pspace.is_power_space
assert pspace.spaces[0] is r2_1
assert pspace.spaces[1] is r2_2
# dtype not well defined for this space
with pytest.raises(AttributeError):
pspace.dtype
def test_element():
H = odl.rn(2)
HxH = odl.ProductSpace(H, H)
HxH.element([[1, 2], [3, 4]])
# wrong length
with pytest.raises(ValueError):
HxH.element([[1, 2]])
with pytest.raises(ValueError):
HxH.element([[1, 2], [3, 4], [5, 6]])
# wrong length of subspace element
with pytest.raises(ValueError):
HxH.element([[1, 2, 3], [4, 5]])
with pytest.raises(ValueError):
HxH.element([[1, 2], [3, 4, 5]])
def test_lincomb():
H = odl.rn(2)
HxH = odl.ProductSpace(H, H)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
u1 = H.element([-1, 7])
u2 = H.element([2, 1])
v = HxH.element([v1, v2])
u = HxH.element([u1, u2])
z = HxH.element()
a = 3.12
b = 1.23
expected = [a * v1 + b * u1, a * v2 + b * u2]
HxH.lincomb(a, v, b, u, out=z)
assert all_almost_equal(z, expected)
def test_multiply():
H = odl.rn(2)
HxH = odl.ProductSpace(H, H)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
u1 = H.element([-1, 7])
u2 = H.element([2, 1])
v = HxH.element([v1, v2])
u = HxH.element([u1, u2])
z = HxH.element()
expected = [v1 * u1, v2 * u2]
HxH.multiply(v, u, out=z)
assert all_almost_equal(z, expected)
def test_metric():
H = odl.rn(2)
v11 = H.element([1, 2])
v12 = H.element([5, 3])
v21 = H.element([1, 2])
v22 = H.element([8, 9])
# 1-norm
HxH = odl.ProductSpace(H, H, exponent=1.0)
w1 = HxH.element([v11, v12])
w2 = HxH.element([v21, v22])
assert almost_equal(HxH.dist(w1, w2),
H.dist(v11, v21) + H.dist(v12, v22))
# 2-norm
HxH = odl.ProductSpace(H, H, exponent=2.0)
w1 = HxH.element([v11, v12])
w2 = HxH.element([v21, v22])
assert almost_equal(
HxH.dist(w1, w2),
(H.dist(v11, v21) ** 2 + H.dist(v12, v22) ** 2) ** (1 / 2.0))
# inf norm
HxH = odl.ProductSpace(H, H, exponent=float('inf'))
w1 = HxH.element([v11, v12])
w2 = HxH.element([v21, v22])
assert almost_equal(
HxH.dist(w1, w2),
max(H.dist(v11, v21), H.dist(v12, v22)))
def test_norm():
H = odl.rn(2)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
# 1-norm
HxH = odl.ProductSpace(H, H, exponent=1.0)
w = HxH.element([v1, v2])
assert almost_equal(HxH.norm(w), H.norm(v1) + H.norm(v2))
# 2-norm
HxH = odl.ProductSpace(H, H, exponent=2.0)
w = HxH.element([v1, v2])
assert almost_equal(
HxH.norm(w), (H.norm(v1) ** 2 + H.norm(v2) ** 2) ** (1 / 2.0))
# inf norm
HxH = odl.ProductSpace(H, H, exponent=float('inf'))
w = HxH.element([v1, v2])
assert almost_equal(HxH.norm(w), max(H.norm(v1), H.norm(v2)))
def test_inner():
H = odl.rn(2)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
u1 = H.element([2, 3])
u2 = H.element([6, 4])
HxH = odl.ProductSpace(H, H)
v = HxH.element([v1, v2])
u = HxH.element([u1, u2])
assert almost_equal(HxH.inner(v, u), H.inner(v1, u1) + H.inner(v2, u2))
def test_vector_weighting(exponent):
r2 = odl.rn(2)
r2x = r2.element([1, -1])
r2y = r2.element([-2, 3])
# inner = -5, dist = 5, norms = (sqrt(2), sqrt(13))
r3 = odl.rn(3)
r3x = r3.element([3, 4, 4])
r3y = r3.element([1, -2, 1])
# inner = -1, dist = 7, norms = (sqrt(41), sqrt(6))
inners = [-5, -1]
norms_x = [np.sqrt(2), np.sqrt(41)]
dists = [5, 7]
weight = [0.5, 1.5]
pspace = odl.ProductSpace(r2, r3, weighting=weight, exponent=exponent)
x = pspace.element((r2x, r3x))
y = pspace.element((r2y, r3y))
if exponent == 2.0:
true_inner = np.sum(np.multiply(inners, weight))
assert all_almost_equal(x.inner(y), true_inner)
if exponent == float('inf'):
true_norm_x = np.linalg.norm(
np.multiply(norms_x, weight), ord=exponent)
else:
true_norm_x = np.linalg.norm(
np.multiply(norms_x, np.power(weight, 1 / exponent)),
ord=exponent)
assert all_almost_equal(x.norm(), true_norm_x)
if exponent == float('inf'):
true_dist = np.linalg.norm(
np.multiply(dists, weight), ord=exponent)
else:
true_dist = np.linalg.norm(
np.multiply(dists, np.power(weight, 1 / exponent)),
ord=exponent)
assert all_almost_equal(x.dist(y), true_dist)
def test_const_weighting(exponent):
r2 = odl.rn(2)
r2x = r2.element([1, -1])
r2y = r2.element([-2, 3])
# inner = -5, dist = 5, norms = (sqrt(2), sqrt(13))
r3 = odl.rn(3)
r3x = r3.element([3, 4, 4])
r3y = r3.element([1, -2, 1])
# inner = -1, dist = 7, norms = (sqrt(41), sqrt(6))
inners = [-5, -1]
norms_x = [np.sqrt(2), np.sqrt(41)]
dists = [5, 7]
weight = 2.0
pspace = odl.ProductSpace(r2, r3, weighting=weight, exponent=exponent)
x = pspace.element((r2x, r3x))
y = pspace.element((r2y, r3y))
if exponent == 2.0:
true_inner = weight * np.sum(inners)
assert all_almost_equal(x.inner(y), true_inner)
if exponent == float('inf'):
true_norm_x = weight * np.linalg.norm(norms_x, ord=exponent)
else:
true_norm_x = (weight ** (1 / exponent) *
np.linalg.norm(norms_x, ord=exponent))
assert all_almost_equal(x.norm(), true_norm_x)
if exponent == float('inf'):
true_dist = weight * np.linalg.norm(dists, ord=exponent)
else:
true_dist = (weight ** (1 / exponent) *
np.linalg.norm(dists, ord=exponent))
assert all_almost_equal(x.dist(y), true_dist)
def custom_inner(x1, x2):
inners = np.fromiter(
(x1p.inner(x2p) for x1p, x2p in zip(x1.parts, x2.parts)),
dtype=x1.space[0].dtype, count=len(x1))
return x1.space.field.element(np.sum(inners))
def custom_norm(x):
norms = np.fromiter(
(xp.norm() for xp in x.parts),
dtype=x.space[0].dtype, count=len(x))
return float(np.linalg.norm(norms, ord=1))
def custom_dist(x1, x2):
dists = np.fromiter(
(x1p.dist(x2p) for x1p, x2p in zip(x1.parts, x2.parts)),
dtype=x1.space[0].dtype, count=len(x1))
return float(np.linalg.norm(dists, ord=1))
def test_custom_funcs():
# Checking the standard 1-norm and standard inner product, just to
# see that the functions are handled correctly.
r2 = odl.rn(2)
r2x = r2.element([1, -1])
r2y = r2.element([-2, 3])
# inner = -5, dist = 5, norms = (sqrt(2), sqrt(13))
r3 = odl.rn(3)
r3x = r3.element([3, 4, 4])
r3y = r3.element([1, -2, 1])
# inner = -1, dist = 7, norms = (sqrt(41), sqrt(6))
pspace_2 = odl.ProductSpace(r2, r3, exponent=2.0)
x = pspace_2.element((r2x, r3x))
y = pspace_2.element((r2y, r3y))
pspace_custom = odl.ProductSpace(r2, r3, inner=custom_inner)
xc = pspace_custom.element((r2x, r3x))
yc = pspace_custom.element((r2y, r3y))
assert almost_equal(x.inner(y), xc.inner(yc))
pspace_1 = odl.ProductSpace(r2, r3, exponent=1.0)
x = pspace_1.element((r2x, r3x))
y = pspace_1.element((r2y, r3y))
pspace_custom = odl.ProductSpace(r2, r3, norm=custom_norm)
xc = pspace_custom.element((r2x, r3x))
assert almost_equal(x.norm(), xc.norm())
pspace_custom = odl.ProductSpace(r2, r3, dist=custom_dist)
xc = pspace_custom.element((r2x, r3x))
yc = pspace_custom.element((r2y, r3y))
assert almost_equal(x.dist(y), xc.dist(yc))
with pytest.raises(TypeError):
odl.ProductSpace(r2, r3, a=1) # extra keyword argument
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, norm=custom_norm, inner=custom_inner)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, dist=custom_dist, inner=custom_inner)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, norm=custom_norm, dist=custom_dist)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, norm=custom_norm, exponent=1.0)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, norm=custom_norm, weighting=2.0)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, dist=custom_dist, weighting=2.0)
with pytest.raises(ValueError):
odl.ProductSpace(r2, r3, inner=custom_inner, weighting=2.0)
def test_power_RxR():
H = odl.rn(2)
HxH = odl.ProductSpace(H, 2)
assert len(HxH) == 2
v1 = H.element([1, 2])
v2 = H.element([3, 4])
v = HxH.element([v1, v2])
u = HxH.element([[1, 2], [3, 4]])
assert all_equal([v1, v2], v)
assert all_equal([v1, v2], u)
def test_power_lincomb():
H = odl.rn(2)
HxH = odl.ProductSpace(H, 2)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
u1 = H.element([-1, 7])
u2 = H.element([2, 1])
v = HxH.element([v1, v2])
u = HxH.element([u1, u2])
z = HxH.element()
a = 3.12
b = 1.23
expected = [a * v1 + b * u1, a * v2 + b * u2]
HxH.lincomb(a, v, b, u, out=z)
assert all_almost_equal(z, expected)
def test_power_in_place_modify():
H = odl.rn(2)
HxH = odl.ProductSpace(H, 2)
v1 = H.element([1, 2])
v2 = H.element([5, 3])
u1 = H.element([-1, 7])
u2 = H.element([2, 1])
z1 = H.element()
z2 = H.element()
v = HxH.element([v1, v2])
u = HxH.element([u1, u2])
z = HxH.element([z1, z2]) # z is simply a wrapper for z1 and z2
a = 3.12
b = 1.23
HxH.lincomb(a, v, b, u, out=z)
# Assert that z1 and z2 has been modified as well
assert all_almost_equal(z, [z1, z2])
def test_getitem_single():
r1 = odl.rn(1)
r2 = odl.rn(2)
H = odl.ProductSpace(r1, r2)
assert H[-2] is r1
assert H[-1] is r2
assert H[0] is r1
assert H[1] is r2
with pytest.raises(IndexError):
H[-3]
H[2]
def test_getitem_slice():
r1 = odl.rn(1)
r2 = odl.rn(2)
r3 = odl.rn(3)
H = odl.ProductSpace(r1, r2, r3)
assert H[:2] == odl.ProductSpace(r1, r2)
assert H[:2][0] is r1
assert H[:2][1] is r2
assert H[3:] == odl.ProductSpace(field=r1.field)
def test_getitem_fancy():
r1 = odl.rn(1)
r2 = odl.rn(2)
r3 = odl.rn(3)
H = odl.ProductSpace(r1, r2, r3)
assert H[[0, 2]] == odl.ProductSpace(r1, r3)
assert H[[0, 2]][0] is r1
assert H[[0, 2]][1] is r3
def test_element_equals():
H = odl.ProductSpace(odl.rn(1), odl.rn(2))
x = H.element([[0], [1, 2]])
assert x != 0 # test == not always true
assert x == x
x_2 = H.element([[0], [1, 2]])
assert x == x_2
x_3 = H.element([[3], [1, 2]])
assert x != x_3
x_4 = H.element([[0], [1, 3]])
assert x != x_4
def test_element_getitem_single():
H = odl.ProductSpace(odl.rn(1), odl.rn(2))
x0 = H[0].element([0])
x1 = H[1].element([1, 2])
x = H.element([x0, x1])
assert x[-2] is x0
assert x[-1] is x1
assert x[0] is x0
assert x[1] is x1
with pytest.raises(IndexError):
x[-3]
x[2]
def test_element_getitem_slice():
H = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3))
x0 = H[0].element([0])
x1 = H[1].element([1, 2])
x2 = H[2].element([3, 4, 5])
x = H.element([x0, x1, x2])
assert x[:2].space == H[:2]
assert x[:2][0] is x0
assert x[:2][1] is x1
def test_element_getitem_fancy():
H = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3))
x0 = H[0].element([0])
x1 = H[1].element([1, 2])
x2 = H[2].element([3, 4, 5])
x = H.element([x0, x1, x2])
assert x[[0, 2]].space == H[[0, 2]]
assert x[[0, 2]][0] is x0
assert x[[0, 2]][1] is x2
def test_element_setitem_single():
"""Test assignment of pspace parts with single indices."""
pspace = odl.ProductSpace(odl.rn(1), odl.rn(2))
x0 = pspace[0].element([0])
x1 = pspace[1].element([1, 2])
x = pspace.element([x0, x1])
old_x0 = x[0]
old_x1 = x[1]
# Check that values are set, but identity is preserved
new_x0 = pspace[0].element([1])
x[-2] = new_x0
assert x[-2] == new_x0
assert x[-2] is old_x0
new_x1 = pspace[1].element([3, 4])
x[-1] = new_x1
assert x[-1] == new_x1
assert x[-1] is old_x1
# Set values with scalars
x[1] = -1
assert all_equal(x[1], [-1, -1])
assert x[1] is old_x1
# Check that out-of-bounds indices raise IndexError
with pytest.raises(IndexError):
x[-3] = x1
with pytest.raises(IndexError):
x[2] = x0
def test_element_setitem_slice():
"""Test assignment of pspace parts with slices."""
pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3))
x0 = pspace[0].element([0])
x1 = pspace[1].element([1, 2])
x2 = pspace[2].element([3, 4, 5])
x = pspace.element([x0, x1, x2])
old_x0 = x[0]
old_x1 = x[1]
# Check that values are set, but identity is preserved
new_x0 = pspace[0].element([6])
new_x1 = pspace[1].element([7, 8])
x[:2] = pspace[:2].element([new_x0, new_x1])
assert x[:2][0] is old_x0
assert x[:2][0] == new_x0
assert x[:2][1] is old_x1
assert x[:2][1] == new_x1
# Set values with sequences of scalars
x[:2] = [-1, -2]
assert x[:2][0] is old_x0
assert all_equal(x[:2][0], [-1])
assert x[:2][1] is old_x1
assert all_equal(x[:2][1], [-2, -2])
def test_element_setitem_fancy():
"""Test assignment of pspace parts with lists."""
pspace = odl.ProductSpace(odl.rn(1), odl.rn(2), odl.rn(3))
x0 = pspace[0].element([0])
x1 = pspace[1].element([1, 2])
x2 = pspace[2].element([3, 4, 5])
x = pspace.element([x0, x1, x2])
old_x0 = x[0]
old_x2 = x[2]
# Check that values are set, but identity is preserved
new_x0 = pspace[0].element([6])
new_x2 = pspace[2].element([7, 8, 9])
x[[0, 2]] = pspace[[0, 2]].element([new_x0, new_x2])
assert x[[0, 2]][0] is old_x0
assert x[[0, 2]][0] == new_x0
assert x[[0, 2]][1] is old_x2
assert x[[0, 2]][1] == new_x2
# Set values with sequences of scalars
x[[0, 2]] = [-1, -2]
assert x[[0, 2]][0] is old_x0
assert all_equal(x[[0, 2]][0], [-1])
assert x[[0, 2]][1] is old_x2
assert all_equal(x[[0, 2]][1], [-2, -2, -2])
def test_element_setitem_broadcast():
"""Test assignment of power space parts with broadcasting."""
pspace = odl.ProductSpace(odl.rn(2), 3)
x0 = pspace[0].element([0, 1])
x1 = pspace[1].element([2, 3])
x2 = pspace[2].element([4, 5])
x = pspace.element([x0, x1, x2])
old_x0 = x[0]
old_x1 = x[1]
# Set values with a single base space element
new_x0 = pspace[0].element([4, 5])
x[:2] = new_x0
assert x[0] is old_x0
assert x[0] == new_x0
assert x[1] is old_x1
assert x[1] == new_x0
def test_unary_ops():
# Verify that the unary operators (`+x` and `-x`) work as expected
space = odl.rn(3)
pspace = odl.ProductSpace(space, 2)
for op in [operator.pos, operator.neg]:
x_arr, x = noise_elements(pspace)
y_arr = op(x_arr)
y = op(x)
assert all_almost_equal([x, y], [x_arr, y_arr])
def test_operators(arithmetic_op):
# Test of the operators `+`, `-`, etc work as expected by numpy
space = odl.rn(3)
pspace = odl.ProductSpace(space, 2)
# Interactions with scalars
for scalar in [-31.2, -1, 0, 1, 2.13]:
# Left op
x_arr, x = noise_elements(pspace)
if scalar == 0 and arithmetic_op in [operator.truediv,
operator.itruediv]:
# Check for correct zero division behaviour
with pytest.raises(ZeroDivisionError):
y = arithmetic_op(x, scalar)
else:
y_arr = arithmetic_op(x_arr, scalar)
y = arithmetic_op(x, scalar)
assert all_almost_equal([x, y], [x_arr, y_arr])
# Right op
x_arr, x = noise_elements(pspace)
y_arr = arithmetic_op(scalar, x_arr)
y = arithmetic_op(scalar, x)
assert all_almost_equal([x, y], [x_arr, y_arr])
# Verify that the statement z=op(x, y) gives equivalent results to NumPy
x_arr, x = noise_elements(space, 1)
y_arr, y = noise_elements(pspace, 1)
# non-aliased left
if arithmetic_op in [operator.iadd,
operator.isub,
operator.itruediv,
operator.imul]:
# Check for correct error since in-place op is not possible here
with pytest.raises(TypeError):
z = arithmetic_op(x, y)
else:
z_arr = arithmetic_op(x_arr, y_arr)
z = arithmetic_op(x, y)
assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr])
# non-aliased right
z_arr = arithmetic_op(y_arr, x_arr)
z = arithmetic_op(y, x)
assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr])
# aliased operation
z_arr = arithmetic_op(y_arr, y_arr)
z = arithmetic_op(y, y)
assert all_almost_equal([x, y, z], [x_arr, y_arr, z_arr])
def test_ufuncs():
# Cannot use fixture due to bug in pytest
H = odl.ProductSpace(odl.rn(1), odl.rn(2))
# one arg
x = H.element([[-1], [-2, -3]])
z = x.ufuncs.absolute()
assert all_almost_equal(z, [[1], [2, 3]])
# one arg with out
x = H.element([[-1], [-2, -3]])
y = H.element()
z = x.ufuncs.absolute(out=y)
assert y is z
assert all_almost_equal(z, [[1], [2, 3]])
# Two args
x = H.element([[1], [2, 3]])
y = H.element([[4], [5, 6]])
w = H.element()
z = x.ufuncs.add(y)
assert all_almost_equal(z, [[5], [7, 9]])
# Two args with out
x = H.element([[1], [2, 3]])
y = H.element([[4], [5, 6]])
w = H.element()
z = x.ufuncs.add(y, out=w)
assert w is z
assert all_almost_equal(z, [[5], [7, 9]])
def test_reductions():
H = odl.ProductSpace(odl.rn(1), odl.rn(2))
x = H.element([[1], [2, 3]])
assert x.ufuncs.sum() == 6.0
assert x.ufuncs.prod() == 6.0
assert x.ufuncs.min() == 1.0
assert x.ufuncs.max() == 3.0
if __name__ == '__main__':
pytest.main([str(__file__.replace('\\', '/')), '-v'])
| bgris/ODL_bgris | odl/test/space/pspace_test.py | Python | gpl-3.0 | 21,616 |
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
from simple_model.__version__ import __version__
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
readme = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except FileNotFoundError:
pass
self.status('Building Source distribution…')
os.system('{0} setup.py sdist'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(__version__))
os.system('git push --tags')
sys.exit()
setup(
name='pysimplemodel',
version=__version__,
description='Data handling made easy',
long_description='\n' + readme,
url='https://github.com/lamenezes/simple-model',
author='Luiz Menezes',
author_email='luiz.menezesf@gmail.com',
packages=find_packages(exclude=['tests']),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
],
cmdclass={
'upload': UploadCommand,
},
)
| lamenezes/simple-model | setup.py | Python | mit | 1,962 |
from SerialClient import *
| GeodesicCarbon/protopaja-sick | src/rosserial_python/src/rosserial_python/__init__.py | Python | mit | 27 |
files_c=[
'C/7zCrc.c',
'C/7zCrcOpt.c',
'C/Alloc.c',
'C/CpuArch.c',
'C/Threads.c',
]
files_cpp=[
'CPP/7zip/Archive/Common/OutStreamWithCRC.cpp',
'CPP/7zip/Common/CreateCoder.cpp',
'CPP/7zip/Common/FilePathAutoRename.cpp',
'CPP/7zip/Common/FileStreams.cpp',
'CPP/7zip/Common/FilterCoder.cpp',
'CPP/7zip/Common/LimitedStreams.cpp',
'CPP/7zip/Common/MethodProps.cpp',
'CPP/7zip/Common/ProgressUtils.cpp',
'CPP/7zip/Common/PropId.cpp',
'CPP/7zip/Common/StreamObjects.cpp',
'CPP/7zip/Common/StreamUtils.cpp',
'CPP/7zip/Common/UniqBlocks.cpp',
'CPP/7zip/Compress/CopyCoder.cpp',
'CPP/7zip/UI/Common/ArchiveCommandLine.cpp',
'CPP/7zip/UI/Common/ArchiveExtractCallback.cpp',
'CPP/7zip/UI/Common/ArchiveOpenCallback.cpp',
'CPP/7zip/UI/Common/Bench.cpp',
'CPP/7zip/UI/Common/DefaultName.cpp',
'CPP/7zip/UI/Common/EnumDirItems.cpp',
'CPP/7zip/UI/Common/Extract.cpp',
'CPP/7zip/UI/Common/ExtractingFilePath.cpp',
'CPP/7zip/UI/Common/HashCalc.cpp',
'CPP/7zip/UI/Common/LoadCodecs.cpp',
'CPP/7zip/UI/Common/OpenArchive.cpp',
'CPP/7zip/UI/Common/PropIDUtils.cpp',
'CPP/7zip/UI/Common/SetProperties.cpp',
'CPP/7zip/UI/Common/SortUtils.cpp',
'CPP/7zip/UI/Common/TempFiles.cpp',
'CPP/7zip/UI/Common/Update.cpp',
'CPP/7zip/UI/Common/UpdateAction.cpp',
'CPP/7zip/UI/Common/UpdateCallback.cpp',
'CPP/7zip/UI/Common/UpdatePair.cpp',
'CPP/7zip/UI/Common/UpdateProduce.cpp',
'CPP/7zip/UI/Common/WorkDir.cpp',
'CPP/7zip/UI/Common/ZipRegistry.cpp',
'CPP/7zip/UI/Explorer/MyMessages.cpp',
'CPP/7zip/UI/FileManager/ExtractCallback.cpp',
'CPP/7zip/UI/FileManager/FormatUtils.cpp',
'CPP/7zip/UI/FileManager/LangUtils.cpp',
'CPP/7zip/UI/FileManager/OverwriteDialog.cpp',
'CPP/7zip/UI/FileManager/OverwriteDialog_rc.cpp',
'CPP/7zip/UI/FileManager/PasswordDialog.cpp',
'CPP/7zip/UI/FileManager/PasswordDialog_rc.cpp',
'CPP/7zip/UI/FileManager/ProgramLocation.cpp',
'CPP/7zip/UI/FileManager/PropertyName.cpp',
'CPP/7zip/UI/FileManager/ProgressDialog2.cpp',
'CPP/7zip/UI/FileManager/ProgressDialog2_rc.cpp',
'CPP/7zip/UI/FileManager/SplitUtils.cpp',
'CPP/7zip/UI/FileManager/StringUtils.cpp',
'CPP/7zip/UI/GUI/BenchmarkDialog.cpp',
'CPP/7zip/UI/GUI/BenchmarkDialog_rc.cpp',
'CPP/7zip/UI/GUI/CompressDialog.cpp',
'CPP/7zip/UI/GUI/CompressDialog_rc.cpp',
'CPP/7zip/UI/GUI/ExtractDialog.cpp',
'CPP/7zip/UI/GUI/ExtractDialog_rc.cpp',
'CPP/7zip/UI/GUI/ExtractGUI.cpp',
'CPP/7zip/UI/GUI/GUI.cpp',
'CPP/7zip/UI/GUI/HashGUI.cpp',
'CPP/7zip/UI/GUI/UpdateCallbackGUI.cpp',
'CPP/7zip/UI/GUI/UpdateCallbackGUI2.cpp',
'CPP/7zip/UI/GUI/UpdateGUI.cpp',
'CPP/7zip/UI/GUI/wxGUI.cpp',
'CPP/Common/CRC.cpp',
'CPP/Common/CommandLineParser.cpp',
'CPP/Common/IntToString.cpp',
'CPP/Common/Lang.cpp',
'CPP/Common/ListFileUtils.cpp',
'CPP/Common/MyString.cpp',
'CPP/Common/MyVector.cpp',
'CPP/Common/MyWindows.cpp',
'CPP/Common/NewHandler.cpp',
'CPP/Common/StringConvert.cpp',
'CPP/Common/StringToInt.cpp',
'CPP/Common/UTFConvert.cpp',
'CPP/Common/Wildcard.cpp',
'CPP/Windows/Control/Controls.cpp',
'CPP/Windows/Control/Dialog.cpp',
'CPP/Windows/DLL.cpp',
'CPP/Windows/ErrorMsg.cpp',
'CPP/Windows/FileDir.cpp',
'CPP/Windows/FileFind.cpp',
'CPP/Windows/FileIO.cpp',
'CPP/Windows/FileName.cpp',
'CPP/Windows/PropVariant.cpp',
'CPP/Windows/PropVariantConv.cpp',
'CPP/Windows/Registry.cpp',
'CPP/Windows/Synchronization.cpp',
'CPP/Windows/System.cpp',
'CPP/Windows/TimeUtils.cpp',
'CPP/Windows/Window.cpp',
'CPP/myWindows/wine_GetXXXDefaultLangID.cpp',
'CPP/myWindows/wine_date_and_time.cpp',
]
| punesemu/puNES | src/extra/p7zip-17.04/Utils/file_7zG.py | Python | gpl-2.0 | 3,531 |
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import bs4 as bs
import urllib.request
from API_KEYS import EMAIL_ADDRESS, EMAIL_PASSWORD
# grab IP address
sauce = urllib.request.urlopen('http://checkip.dyndns.com/').read()
soup = bs.BeautifulSoup(sauce, 'lxml')
# compose email message
fromaddr = (EMAIL_ADDRESS)
toaddr = (EMAIL_ADDRESS)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Public IP"
body = (soup.body.text)
msg.attach(MIMEText(body, 'plain'))
# authenticate and send email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, (EMAIL_PASSWORD))
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
| inflatus/Python | Networking/check_ip_email.py | Python | mit | 759 |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="reviewday",
version="0.2.0",
author="Dan Prince",
author_email="dan.prince@rackspace.com",
description=("Report generator for OpenStack code reviews."),
license="BSD",
keywords="OpenStack HTML report generator",
url="https://github/dprince/reviewday",
packages=['reviewday'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
scripts=['bin/reviewday'],
data_files=[
('reviewday', ['reviewday/report.html']),
('reviewday/report_files', [
'reviewday/report_files/arrowBlank',
'reviewday/report_files/arrowDown',
'reviewday/report_files/arrowUp',
'reviewday/report_files/combo.css',
'reviewday/report_files/CRITICALBUGFIX.png',
'reviewday/report_files/ESSENTIALFEATURE.png',
'reviewday/report_files/HIGHBUGFIX.png',
'reviewday/report_files/HIGHFEATURE.png',
'reviewday/report_files/LOWBUGFIX.png',
'reviewday/report_files/LOWFEATURE.png',
'reviewday/report_files/MEDIUMBUGFIX.png',
'reviewday/report_files/MEDIUMFEATURE.png',
'reviewday/report_files/NOLINK.png',
'reviewday/report_files/REGRESSIONHOTFIX.png',
'reviewday/report_files/RELEASECRITICALBUG.png',
'reviewday/report_files/sorting.js',
'reviewday/report_files/UNDECIDEDBUGFIX.png',
'reviewday/report_files/UNTARGETEDFEATURE.png',
'reviewday/report_files/WISHLISTBUGFIX.png',
])
],
install_requires=[
"launchpadlib",
"cheetah",
],
)
| openstack-infra/reviewday | setup.py | Python | mit | 1,838 |
def len2mask(len):
"""Convert a bit length to a dotted netmask (aka. CIDR to netmask)"""
mask = ''
if not isinstance(len, int) or len < 0 or len > 32:
print "Illegal subnet length: %s (which is a %s)" % \
(str(len), type(len).__name__)
return None
for t in range(4):
if len > 7:
mask += '255.'
else:
dec = 255 - (2**(8 - len) - 1)
mask += str(dec) + '.'
len -= 8
if len < 0:
len = 0
return mask[:-1]
def mask2len(subnet):
"""Convert a dotted netmask to bit length (aka. netmask to CIDR)"""
octets = [int(x) for x in subnet.split(".")]
count = 0
for octet in octets:
highest_bit = 128
while highest_bit > 0:
if octet >= highest_bit:
count = count + 1
octet = octet - highest_bit
highest_bit = highest_bit / 2
else:
return count
return count
class FilterModule(object):
''' utility to convert cidr netmasks into len and reverse '''
def filters(self):
return {'mask2len': mask2len,
'len2mask': len2mask}
| kili/playbooks | filter_plugins/netmask_conversion.py | Python | gpl-3.0 | 1,194 |
"""
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import contentstore.git_export_utils as git_export_utils
from contentstore.git_export_utils import GitExportError
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
option_list = BaseCommand.option_list + (
make_option('--username', '-u', dest='user',
help=('Specify a username from LMS/Studio to be used '
'as the commit author.')),
make_option('--repo_dir', '-r', dest='repo',
help='Specify existing git repo directory.'),
)
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
if len(args) != 2:
raise CommandError('This script requires exactly two arguments: '
'course_loc and git_url')
# Rethrow GitExportError as CommandError for SystemExit
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError(unicode(GitExportError.BAD_COURSE))
try:
git_export_utils.export_to_git(
course_key,
args[1],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(unicode(ex.message))
| fintech-circle/edx-platform | cms/djangoapps/contentstore/management/commands/git_export.py | Python | agpl-3.0 | 2,816 |
# Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
class Vector:
"""Represent a vector in a multidimensional space."""
def __init__(self, d):
if isinstance(d, int):
self._coords = [0] * d
else:
try: # we test if param is iterable
self._coords = [val for val in d]
except TypeError:
raise TypeError('invalid parameter type')
def __len__(self):
"""Return the dimension of the vector."""
return len(self._coords)
def __getitem__(self, j):
"""Return jth coordinate of vector."""
return self._coords[j]
def __setitem__(self, j, val):
"""Set jth coordinate of vector to given value."""
self._coords[j] = val
def __add__(self, other):
"""Return sum of two vectors."""
if len(self) != len(other): # relies on __len__ method
raise ValueError('dimensions must agree')
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = self[j] + other[j]
return result
def __eq__(self, other):
"""Return True if vector has same coordinates as other."""
return self._coords == other._coords
def __ne__(self, other):
"""Return True if vector differs from other."""
return not self == other # rely on existing __eq__ definition
def __str__(self):
"""Produce string representation of vector."""
return '<' + str(self._coords)[1:-1] + '>' # adapt list representation
def __neg__(self):
"""Return copy of vector with all coordinates negated."""
result = Vector(len(self)) # start with vector of zeros
for j in range(len(self)):
result[j] = -self[j]
return result
def __lt__(self, other):
"""Compare vectors based on lexicographical order."""
if len(self) != len(other):
raise ValueError('dimensions must agree')
return self._coords < other._coords
def __le__(self, other):
"""Compare vectors based on lexicographical order."""
if len(self) != len(other):
raise ValueError('dimensions must agree')
return self._coords <= other._coords
if __name__ == '__main__':
# the following demonstrates usage of a few methods
v = Vector(5) # construct five-dimensional <0, 0, 0, 0, 0>
v[1] = 23 # <0, 23, 0, 0, 0> (based on use of __setitem__)
v[-1] = 45 # <0, 23, 0, 0, 45> (also via __setitem__)
print(v[4]) # print 45 (via __getitem__)
u = v + v # <0, 46, 0, 0, 90> (via __add__)
print(u) # print <0, 46, 0, 0, 90>
total = 0
for entry in v: # implicit iteration via __len__ and __getitem__
total += entry
| consultit/Ely | ely/direct/data_structures_and_algorithms/ch02/vector.py | Python | lgpl-3.0 | 3,819 |
# coding=utf-8
import random
import re
import IrcClient
import worddb
import imp
import sys
def HandlerFunc(client, event, prefix, command, params, trailing):
if params != client.nick:
if command == "PRIVMSG":
e = re.compile('Talk about yourself,{0,1}\s+.*'+client.nick+'.*')
match = re.search(e,trailing)
if match!=None:
e = re.compile(ur'([\w]*)![\w@.-]*')
match = re.search(e,prefix)
if match!=None:
if client.IsIgnored(match.group(1))!=True:
client.SendMessage(params, random.choice(worddb.greetings)+" "+match.group(1)+"!")
client.SendMessage(params, "I am a dragon! My creator is mkalte, I'm mostly friendly and if I like you I wont burn you :3")
client.SendMessage(params, "My DNA is not really good documented, but you can find it here: https://github.com/mkalte666/Dragonflame/ !") | mkalte666/Dragonflame | TalkAboutYourselfHandler.py | Python | mit | 838 |
from Screens.Screen import Screen
from Components.Sources.CanvasSource import CanvasSource
from Components.ActionMap import ActionMap
from enigma import gFont
from enigma import RT_HALIGN_RIGHT, RT_WRAP
def RGB(r,g,b):
return (r<<16)|(g<<8)|b
class VideoFinetune(Screen):
skin = """
<screen position="0,0" size="720,576">
<widget source="Canvas" render="Canvas" position="0,0" size="720,576" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["Canvas"] = CanvasSource()
self.basic_colors = [RGB(255, 255, 255), RGB(255, 255, 0), RGB(0, 255, 255), RGB(0, 255, 0), RGB(255, 0, 255), RGB(255, 0, 0), RGB(0, 0, 255), RGB(0, 0, 0)]
self["actions"] = ActionMap(["InputActions", "OkCancelActions"],
{
"1": self.testpic_brightness,
"2": self.testpic_contrast,
# "3": self.testpic_colors,
"3": self.testpic_filter,
"4": self.testpic_gamma,
"5": self.testpic_fubk,
"ok": self.callNext,
"cancel": self.close,
})
self.testpic_brightness()
def callNext(self):
if self.next:
self.next()
def bbox(self, x, y, width, height, col, xx, yy):
c = self["Canvas"]
c.fill(x, y, xx, yy, col)
c.fill(x + width - xx, y, xx, yy, col)
c.fill(x, y + height - yy, xx, yy, col)
c.fill(x + width - xx, y + height - yy, xx, yy, col)
def testpic_brightness(self):
self.next = self.testpic_contrast
c = self["Canvas"]
xres, yres = 720, 576
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
# for i in range(8):
# col = (7-i) * 255 / 7
# width = xres - xres/5
# ew = width / 15
# offset = xres/10 + ew * i
# y = yres * 2 / 3
# height = yres / 6
#
# c.fill(offset, y, ew, height, RGB(col, col, col))
#
# if col == 0 or col == 16 or col == 116:
# self.bbox(offset, y, ew, height, RGB(255,255,255), bbw, bbh)
for i in range(15):
col = i * 116 / 14
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
if col == 0 or col == 16 or col == 116:
c.fill(x, offset, width, 2, RGB(255, 255, 255))
# if col == 0 or col == 36:
# self.bbox(x, offset, width, eh, RGB(255,255,255), bbw, bbh)
if i < 2:
c.writeText(x + width, offset, width, eh, RGB(255, 255, 255), RGB(0,0,0), gFont("Regular", 20), "%d." % (i+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,255,255), RGB(0,0,0), gFont("Regular", 40),
_("Brightness"))
c.writeText(xres / 10, yres / 6, xres * 4 / 7, yres / 6, RGB(255,255,255), RGB(0,0,0), gFont("Regular", 20),
_("If your TV has a brightness or contrast enhancement, disable it. If there is something called \"dynamic\", "
"set it to standard. Adjust the backlight level to a value suiting your taste. "
"Turn down contrast on your TV as much as possible.\nThen turn the brightness setting as "
"low as possible, but make sure that the two lowermost shades of gray stay distinguishable.\n"
"Do not care about the bright shades now. They will be set up in the next step.\n"
"If you are happy with the result, press OK."),
RT_WRAP)
c.flush()
def testpic_contrast(self):
# self.next = self.testpic_colors
self.next = self.close
c = self["Canvas"]
xres, yres = 720, 576
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
# for i in range(15):
# col = 185 + i * 5
# width = xres - xres/5
# ew = width / 15
# offset = xres/10 + ew * i
# y = yres * 2 / 3
# height = yres / 6
#
# c.fill(offset, y, ew, height, RGB(col, col, col))
#
# if col == 185 or col == 235 or col == 255:
# self.bbox(offset, y, ew, height, RGB(0,0,0), bbw, bbh)
for i in range(15):
# col = (7-i) * 255 / 7
col = 185 + i * 5
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
# if col == 0 or col == 36:
# self.bbox(x, offset, width, eh, RGB(255,255,255), bbw, bbh);
# if col == 255:
# self.bbox(x, offset, width, eh, RGB(0,0,0), bbw, bbh);
if col == 185 or col == 235 or col == 255:
c.fill(x, offset, width, 2, RGB(0,0,0))
if i >= 13:
c.writeText(x + width, offset, width, eh, RGB(0, 0, 0), RGB(255, 255, 255), gFont("Regular", 20), "%d." % (i-13+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", 40),
_("Contrast"))
c.writeText(xres / 10, yres / 6, xres / 2, yres / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", 20),
_("Now, use the contrast setting to turn up the brightness of the background as much as possible, "
"but make sure that you can still see the difference between the two brightest levels of shades."
"If you have done that, press OK."),
RT_WRAP)
c.flush()
def testpic_colors(self):
self.next = self.close
c = self["Canvas"]
xres, yres = 720, 576
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
for i in range(33):
col = i * 255 / 32;
width = xres - xres/5;
ew = width / 33;
offset = xres/10 + ew * i;
y = yres * 2 / 3;
height = yres / 20;
o = yres / 60;
if i < 16:
c1 = 0xFF;
c2 = 0xFF - (0xFF * i / 16);
else:
c1 = 0xFF - (0xFF * (i - 16) / 16);
c2 = 0;
c.fill(offset, y, ew, height, RGB(c1, c2, c2))
c.fill(offset, y + (height + o) * 1, ew, height, RGB(c2, c1, c2))
c.fill(offset, y + (height + o) * 2, ew, height, RGB(c2, c2, c1))
c.fill(offset, y + (height + o) * 3, ew, height, RGB(col, col, col))
if i == 0:
self.bbox(offset, y, ew, height, RGB(0,0,0), bbw, bbh);
self.bbox(offset, y + (height + o) * 1, ew, height, RGB(0,0,0), bbw, bbh);
self.bbox(offset, y + (height + o) * 2, ew, height, RGB(0,0,0), bbw, bbh);
for i in range(8):
height = yres / 3;
eh = height / 8;
offset = yres/6 + eh * i;
x = xres * 2 / 3;
width = yres / 6;
c.fill(x, offset, width, eh, self.basic_colors[i])
if i == 0:
self.bbox(x, offset, width, eh, RGB(0,0,0), bbw, bbh)
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", 40),
("Color"))
c.writeText(xres / 10, yres / 6, xres / 2, yres / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", 20),
_("Adjust the color settings so that all the color shades are distinguishable, but appear as saturated as possible. "
"If you are happy with the result, press OK to close the video fine-tuning, or use the number keys to select other test screens."),
RT_WRAP)
c.flush()
def testpic_filter(self):
c = self["Canvas"]
xres, yres = 720, 576
c.fill(0, 0, xres, yres, RGB(64, 64, 64))
width = xres - xres/5
offset = xres/10
yb = yres * 2 / 3
height = yres / 20
o = yres / 60
border = xres / 60
g1 = 255
g2 = 128
c.fill(offset - border, yb - border, border * 2 + width, border * 2 + (height * 3 + o * 2), RGB(g1, g1, g1))
for x in xrange(0, width, 2):
c.fill(offset + x, yb, 1, height, RGB(g2,g2,g2))
for x in xrange(0, width, 4):
c.fill(offset + x, yb + (o + height), 2, height, RGB(g2,g2,g2))
for x in xrange(0, width, 8):
c.fill(offset + x, yb + (o + height) * 2, 4, height, RGB(g2,g2,g2))
c.flush()
def testpic_gamma(self):
self.next = None
c = self["Canvas"]
xres, yres = 720, 576
c.fill(0, 0, xres, yres, RGB(0, 0, 0))
width = xres - xres/5
offset_x = xres/10
height = yres - yres/5
offset_y = yres/10
for y in xrange(0, height, 4):
c.fill(offset_x, offset_y + y, width/2, 2, RGB(255,255,255))
l = 0
fnt = gFont("Regular", height / 14)
import math
for i in xrange(1, 15):
y = i * height / 14
h = y - l
gamma = 0.6 + i * 0.2
col = int(math.pow(.5, 1.0/gamma) * 256.0)
c.fill(offset_x + width/2, offset_y + l, width/2, h, RGB(col,col,col))
c.writeText(offset_x + width/2, offset_y + l, width/2, h, RGB(0,0,0), RGB(col,col,col), fnt, "%1.2f" % gamma, RT_WRAP|RT_HALIGN_RIGHT)
l = y
c.flush()
def testpic_fubk(self):
self.next = None
# TODO:
# this test currently only works for 4:3 aspect.
# also it's hardcoded to 720,576
c = self["Canvas"]
xres, yres = 720, 576
c.fill(0, 0, xres, yres, RGB(128, 128, 128))
for x in xrange(6, xres, 44):
c.fill(x, 0, 3, yres, RGB(255,255,255))
for y in xrange(34, yres, 44):
c.fill(0, y, xres, 3, RGB(255,255,255))
for i in range(8):
c.fill(140+i*55, 80, 55, 80, self.basic_colors[i])
g = i * 255 / 7
c.fill(140+i*55, 160, 55, 80, RGB(g,g,g))
x = 0
phase = 0
while x < 440:
freq = (440 - x) / 44 + 1
if phase:
col = RGB(255,255,255)
else:
col = RGB(0,0,0)
c.fill(140+x, 320, freq, 160, col)
x += freq
phase = not phase
c.flush()
| kakunbsc/enigma2 | lib/python/Plugins/SystemPlugins/VideoTune/VideoFinetune.py | Python | gpl-2.0 | 8,884 |
import os
import sys
import pytest
class SomeClass:
class SomeClass:
def twice(self, a):
something = os
return something
def twice(self, b):
pass
def some_function():
pass
@pytest.mark.parametrize(
'string, descriptions, kwargs', [
# No completions
('SomeClass', ['class SomeClass'], {}),
('SomeClass', ['class SomeClass', 'class SomeClass.SomeClass'], dict(all_scopes=True)),
('Some', [], dict(all_scopes=True)),
('os', ['module os'], {}),
('sys', ['module sys'], {}),
('sys.path', ['statement sys.path'], {}),
('sys.exit', ['function sys.exit'], {}),
('something', [], {}),
('something', ['statement SomeClass.SomeClass.twice.something'], dict(all_scopes=True)),
# Completions
('class Some', ['class SomeClass', 'class SomeClass.SomeClass'],
dict(all_scopes=True, complete=True)),
('class Some', ['class SomeClass'], dict(complete=True)),
('Some', ['class SomeClass', 'class SomeClass.SomeClass',
'statement SomeClass.SomeClass.twice.something',
'function SomeClass.some_function'], dict(all_scopes=True, complete=True)),
('some', ['class SomeClass', 'class SomeClass.SomeClass',
'statement SomeClass.SomeClass.twice.something',
'function SomeClass.some_function'], dict(all_scopes=True, complete=True)),
# Fuzzy
('class Smelss', ['class SomeClass'], dict(complete=True, fuzzy=True)),
('class Smelss', ['class SomeClass', 'class SomeClass.SomeClass'],
dict(complete=True, fuzzy=True, all_scopes=True)),
# Nested
('SomeClass.SomeClass', ['class SomeClass.SomeClass'],
dict(all_scopes=True)),
('SomeClass.SomeClass.twice', ['function SomeClass.SomeClass.twice'],
dict(all_scopes=True)),
('SomeClass.SomeClass.twice.__call__', ['function types.FunctionType.__call__'],
dict(all_scopes=True)),
('SomeClass.SomeClass.twice.something', [], dict(all_scopes=True)),
('SomeClass.twice', ['function SomeClass.twice', 'function SomeClass.SomeClass.twice'],
dict(all_scopes=True)),
# Nested completions
('SomeClass.twi', ['function SomeClass.twice', 'function SomeClass.SomeClass.twice'],
dict(all_scopes=True, complete=True)),
# Fuzzy unfortunately doesn't work
('SomeCl.twice', [], dict(all_scopes=True, complete=True, fuzzy=True)),
]
)
def test_simple_search(Script, string, descriptions, kwargs):
if kwargs.pop('complete', False) is True:
defs = Script(path=__file__).complete_search(string, **kwargs)
else:
defs = Script(path=__file__).search(string, **kwargs)
this_mod = 'test.test_api.test_search.'
assert [d.type + ' ' + d.full_name.replace(this_mod, '') for d in defs] == descriptions
@pytest.mark.parametrize(
'string, completions, fuzzy, all_scopes', [
('SomeCl', ['ass'], False, False),
('SomeCl', [None], True, False),
('twic', [], False, False),
('some_f', [], False, False),
('twic', ['e', 'e'], False, True),
('some_f', ['unction'], False, True),
]
)
def test_complete_search(Script, string, completions, fuzzy, all_scopes):
defs = Script(path=__file__).complete_search(string, fuzzy=fuzzy, all_scopes=all_scopes)
assert [d.complete for d in defs] == completions
| snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/test/test_api/test_search.py | Python | gpl-3.0 | 3,506 |
import subprocess
def run(cmd, max_minutes = 6000):
import time
sys.stderr.write("Running cmd: %s\n"%cmd)
p = subprocess.Popen(cmd ,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(file_stdin, file_stdout, file_stderr) = (p.stdin, p.stdout, p.stderr)
t = 0
r = ''
e = ''
while t < 60*max_minutes and p.poll() is None:
time.sleep(1) # (comment 1)
t += 1
r += file_stdout.read()
e += file_stderr.read()
r += file_stdout.read()
e += file_stderr.read()
file_stdin.close()
#lines = file_stdout.read()
lines_stderr = file_stderr.read()
exit_code = file_stdout.close()
file_stdout.close()
file_stderr.close()
return (r, e, exit_code)
if __name__ == "__main__":
import sys, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outprefix", dest="outprefix", default = 'leafcutter',
help="Output prefix (default leafcutter)")
parser.add_option("-t", "--tempdir", dest="tmpdir", default='./tmp/',
help="Where to output files (default ./)")
parser.add_option("-d", "--leafdir", dest="leafd", default='./',
help="Top-level LeafCutter directory")
parser.add_option("-l", "--maxintronlen", dest="maxintronlen", default = 100000,
help="Maximum intron length in bp (default 100,000bp)")
parser.add_option("-m", "--minclureads", dest="minclureads", default = 30,
help="Minimum reads in a cluster (default 30 reads)")
parser.add_option("-p", "--mincluratio", dest="mincluratio", default = 0.001,
help="Minimum fraction of reads in a cluster that support a junction (default 0.001)")
parser.add_option("-a", "--annot", dest="annotation", default = None,
help="[optional] Path of annotation GTF file e.g. ~/tools/leafcutter/clustering/gencode.v19.annotation.gtf.gz")
parser.add_option("-b", "--bams", dest="bam",
help="Text file listing bam files to quantify")
(options, args) = parser.parse_args()
try: open(options.leafd+"/clustering/leafcutter_cluster.py")
except:
sys.stderr.write("Please specify correct LeafCutter directory e.g. -d tools/leafcutter/.\n")
exit(0)
if options.bam == None:
sys.stderr.write("Error: no bam file provided...\n")
exit(0)
bams = open(options.bam).readlines()
# create tmp file directory
try: os.mkdir(options.tmpdir)
except: pass
# (should check if samtools are installed)
sys.stderr.write("processing bam files...\n")
fout = file("%s/junction_files.txt"%options.tmpdir,'w')
for bam in bams:
bam = bam.strip()
bedfile = "%s/%s.bed"%(options.tmpdir,bam.split('/')[-1])
juncfile = "%s/%s.junc"%(options.tmpdir,bam.split('/')[-1])
fout.write(juncfile+'\n')
try: open(juncfile)
except: pass
else:
sys.stderr.write("%s exists..skipping\n"%juncfile)
continue
print run("samtools view %s | python %s/scripts/filter_cs.py | %s/scripts/sam2bed.pl --use-RNA-strand - %s"%(bam, options.leafd, options.leafd,bedfile))[1]
print run("%s/scripts/bed2junc.pl %s %s; rm %s"%(options.leafd,bedfile,juncfile, bedfile))[1]
fout.close()
print run("python %s/clustering/leafcutter_cluster.py -j %s/junction_files.txt -m %s -o %s -l %s -r %s -p %s"%(options.leafd,options.tmpdir,options.minclureads, options.outprefix,str(options.maxintronlen), options.tmpdir,str(options.mincluratio)))[1]
if options.annotation != None:
print run("python %s/clustering/get_cluster_gene.py %s %s/%s_perind.counts.gz"%(options.leafd,options.annotation, options.tmpdir,options.outprefix))[1]
pass
print run("python %s/scripts/prepare_phenotype_table.py %s/%s_perind.counts.gz"%(options.leafd,options.tmpdir,options.outprefix))
sys.stdout.write("\n*******fastQTL instructions (also see http://fastqtl.sourceforge.net/) *******\n")
sys.stdout.write("\n(1) Prepare phenotypes: Use `sh %s/%s_perind.counts.gz_prepare.sh' to create index for fastQTL (requires tabix and bgzip).\n"%(options.tmpdir,options.outprefix))
sys.stdout.write("(2) Prepare covariates: To take the top 5 PCs, use `head -6 %s/%s_perind.counts.gz.PCs > %s/%s_perind.counts.gz.PCs.PC5\n"%(options.tmpdir,options.outprefix,options.tmpdir,options.outprefix))
sys.stdout.write("(3) Prepare genotypes: bgzip + tabix your genotype (VCF) file > SNPs.MAF05.txt.gen.gz (not included)\n")
sys.stdout.write("(4) Run fastQTL: Use e.g. `FastQTL/bin/fastQTL.static --vcf SNPs.MAF05.txt.gen.gz --bed %s/%s_perind.counts.gz.qqnorm_chr1.gz -out %s/%s_output_chr1 --chunk 1 1 --window 1e5 --cov %s/%s_perind.counts.gz.PCs.PC5\n\n\n"%(options.tmpdir,options.outprefix,options.tmpdir,options.outprefix,options.tmpdir,options.outprefix))
| davidaknowles/leafcutter | example_data/run_sQTL.py | Python | apache-2.0 | 5,156 |
from __future__ import unicode_literals
from zope.interface import Interface
class IMapping(Interface):
def getHadler(event):
"""
return name of mehod for handle event
"""
class IEventFactory(Interface):
def buildEvent(data):
"""
return instance of event initialize with data
"""
def eventSize():
"""
return size of event in bytes
"""
| buben19/twistedinput | twistedinput/interfaces.py | Python | unlicense | 424 |
from onlineticket.generated.ticket import Ticket
from onlineticket.section import SectionParser
class TicketParser:
def parse(self, filename):
parsed = self._parse_kaitai(filename)
return self._map(parsed)
def _parse_kaitai(self, filename):
return Ticket.from_file(filename)
def _map(self, parsed):
p = parsed
return {
'header': {
'version': p.version,
'issuer': p.issuer,
'signature_key_id': p.key_id,
'signature': p.signature.hex(),
'payload_size': p.payload_size
},
'payload': self._map_payload(parsed)
}
def _map_payload(self, parsed):
section_parser = SectionParser()
return list(map(lambda section: section_parser.parse(section), parsed.payload.section)) | joushx/Online-Ticket-Code | onlineticket/ticketparser.py | Python | mit | 860 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://{0!s}'.format((dataPath)),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| runt18/nupic | tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py | Python | agpl-3.0 | 14,979 |
from .ar_model import AR
from .arima_model import ARMA, ARIMA
from . import vector_ar as var
from .arima_process import arma_generate_sample, ArmaProcess
from .vector_ar.var_model import VAR
from .vector_ar.svar_model import SVAR
from .vector_ar.dynamic import DynamicVAR
from .filters import api as filters
from . import tsatools
from .tsatools import (add_trend, detrend, lagmat, lagmat2ds, add_lag)
from . import interp
from . import stattools
from .stattools import *
from .base import datetools
from .seasonal import seasonal_decompose
from ..graphics import tsaplots as graphics
from .x13 import x13_arima_select_order
from .x13 import x13_arima_analysis
| detrout/debian-statsmodels | statsmodels/tsa/api.py | Python | bsd-3-clause | 661 |
import json
from flask.ext.api import status
import flask as fk
from api import app, check_access, upload_handler
from ddsmdb.common.models import ProjectModel
from ddsmdb.common.models import ContainerModel
from ddsmdb.common.models import UserModel
from ddsmdb.common.models import RecordModel
import mimetypes
import traceback
# from flask.ext.stormpath import user
API_VERSION = 1
API_URL = '/api/v{0}/private'.format(API_VERSION)
@app.route(API_URL + '/<api_token>/project/push/<project_name>', methods=['POST'])
def push_project(api_token, project_name):
print api_token
current_user = check_access(api_token)
if current_user is not None:
# user = UserModel.objects(email=user.email).first_or_404()
if fk.request.method == 'POST': # POST to create a new one only.
project, created = ProjectModel.objects.get_or_create(name=project_name, owner=current_user)
if created:
# created_at = db.DateTimeField(default=datetime.datetime.now())
# owner = db.ReferenceField(UserModel, reverse_delete_rule=db.CASCADE, required=True)
# name = db.StringField(max_length=300, required=True)
# description = db.StringField(max_length=10000)
# goals = db.StringField(max_length=500)
# private = db.BooleanField(default=False)
# history = db.ListField(db.EmbeddedDocumentField(ContainerModel))
project.description = 'No description provided.'
project.goals = 'No goals provided.'
if fk.request.data:
data = json.loads(fk.request.data)
project.private = data.get('private', False)
project.description = data.get('description', project.description)
project.goals = data.get('goals', project.goals)
project.save()
return fk.make_response("Project created.", status.HTTP_201_CREATED)
else:
return fk.make_response('Push refused. Project name already used.', status.HTTP_401_UNAUTHORIZED)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/project/sync/<project_name>', methods=['PUT', 'POST'])
def sync_project(api_token, project_name):
current_user = check_access(api_token)
if current_user is not None:
# user = UserModel.objects(email=user.email).first_or_404()
if fk.request.method == 'PUT': # PUT to update an existing one only.
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
if fk.request.data:
data = json.loads(fk.request.data)
project.name = data.get('name', project.name)
project.private = data.get('private', project.private)
project.description = data.get('description', project.description)
project.goals = data.get('goals', project.goals)
project.save()
return fk.make_response('Project synchronized.', status.HTTP_201_CREATED)
elif fk.request.method == 'POST':
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
container = ContainerModel()
if fk.request.files:
print "Contains some request files..."
try:
if fk.request.files['data']:
data_obj = fk.request.files['data']
data = json.loads(data_obj.read())
print str(data)
containing = {}
if data.get('system','unknown') != 'unknown':
container.system = data.get('system', 'unknown')
del data['system']
else:
container.system = 'unknown'
print "Project Container System: "+str(container.system)
if len(data.get('version',{})) != 0:
container.version = data.get('version',{})
del data['version']
else:
container.version = {}
print "Project Version Control: "+str(container.version)
if len(data.get('image',{})) != 0:
container.image = data.get('image',{'scope':'unknown'})
del data['image']
else:
container.image = {'scope':'unknown'} # unknown, local, remote
print "Project Image Scope: "+str(container.image)
container.save()
except Exception, e:
return fk.make_response(str(traceback.print_exc()), status.HTTP_400_BAD_REQUEST)
# if len(record.image) == 0:
# print "Image to record..."
if container.image['scope'] == 'local':
try:
if fk.request.files['image']:
image_obj = fk.request.files['image']
try:
container.save()
upload_handler(current_user, container, image_obj, container.system)
project.history.append(str(container.id))
project.save()
print str(project.history[-1])
except Exception, e:
return fk.make_response(str(traceback.print_exc()), status.HTTP_400_BAD_REQUEST)
except Exception, e:
return fk.make_response(str(traceback.print_exc()), status.HTTP_400_BAD_REQUEST)
return fk.make_response('Project is at the new staged container image.', status.HTTP_201_CREATED)
else:
return fk.make_response('No container image staged here.', status.HTTP_204_NO_CONTENT)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
# @app.route(API_URL + '/<api_token>/<user_id>/project/clone/<project_name>', methods=['GET'])
# def clone_project(api_token, user_id, project_name):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'GET':
# owner = UserModel.objects(id=user_id).first_or_404()
# project = ProjectModel.objects(name=project_name, owner=owner).first_or_404()
# if not project.private:
# clo_project = ProjectModel.objects(name=project_name, owner=current_user).first()
# if clo_project == None:
# clo_project = project.clone()
# clo_project.owner = current_user
# clo_project.status = {'origin':str(user_id)+":"+project_name+":"+str(record_id)}
# clo_project.save()
# else:
# return fk.Response("Project already exist in your workspace!", status.HTTP_201_CREATED)
# else:
# return fk.make_response('Access denied. Private project.', status.HTTP_401_UNAUTHORIZED)
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/project/pull', methods=['GET'])
def pull_project_all(api_token):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
projects = ProjectModel.objects(owner=current_user)
summaries = [json.loads(p.summary_json()) for p in projects]
return fk.Response(json.dumps({'number':len(summaries), 'projects':summaries}), mimetype='application/json')
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/project/pull/<project_name>', methods=['GET'])
def pull_project(api_token, project_name):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
# user = UserModel.objects(email=user.email).first_or_404()
if project_name is not None:
project = ProjectModel.objects(owner=current_user, name=project_name).first_or_404()
return fk.Response(project.activity_json(), mimetype='application/json')
# else:
# projects = ProjectModel.objects(owner=current_user)
# summaries = [p.summary_json() for p in projects]
# return fk.Response(json.dumps({'projects':summaries}), mimetype='application/json')
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/project/remove/<project_name>', methods=['DELETE'])
def remove_project(api_token, project_name):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'DELETE':
# user = UserModel.objects(email=user.email).first_or_404()
if project_name is not None:
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
project.delete()
return fk.Response('Project deleted', status.HTTP_200_OK)
else:
projects = ProjectModel.objects(owner=current_user)
for project in projects:
project.delete()
return fk.Response('All projects deleted', status.HTTP_200_OK)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/project/dashboard', methods=['GET'])
def dashboard_project(api_token):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
projects = ProjectModel.objects(owner=current_user)
summaries = []
for p in projects:
project = {"project":p.summary_json()}
records = RecordModel.pbjects(project=p)
project["activity"] = {"number":len(records), "records":[{"id":record.id, "created":record.created_at} for record in records]}
summaries.append(project)
return fk.Response(json.dumps({'number':len(summaries), 'projects':summaries}), mimetype='application/json')
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.make_response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED) | faical-yannick-congo/ddsm-api | api/endpoints/project_rest.py | Python | mit | 11,514 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkManagementClientOperationsMixin(object):
def check_dns_name_availability(
self,
location, # type: str
domain_name_label, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DnsNameAvailabilityResult"
"""Checks whether a domain name in the cloudapp.azure.com zone is available for use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must conform to the following
regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DnsNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.DnsNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.check_dns_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['domainNameLabel'] = self._serialize.query("domain_name_label", domain_name_label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DnsNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_dns_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_network_management_client_operations.py | Python | mit | 4,144 |
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2012 Randall Ma
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 horsik
# Copyright (c) 2013 Tao Sauvage
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List # noqa: F401
from urllib import request
import json
import pathlib
import webbrowser
from libqtile import bar, layout, widget, qtile, hook
from libqtile.config import Click, Drag, Group, Key, Match, Screen, ScratchPad, DropDown
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
mod = "mod4"
terminal = guess_terminal()
personal_bin = str(pathlib.Path().home().joinpath(".bin"))
gruvbox_dark = {
"background": "#282828",
"black": "#282828",
"blue": "#458588",
"brightBlack": "#928374",
"brightBlue": "#83A598",
"brightCyan": "#8EC07C",
"brightGreen": "#B8BB26",
"brightPurple": "#D3869B",
"brightRed": "#FB4934",
"brightWhite": "#EBDBB2",
"brightYellow": "#FABD2F",
"cursorColor": "#FFFFFF",
"cyan": "#689D6A",
"foreground": "#EBDBB2",
"green": "#98971A",
"purple": "#B16286",
"red": "#CC241D",
"selectionBackground": "#FFFFFF",
"white": "#A89984",
"yellow": "#D79921",
}
keys = [
# A list of available commands that can be bound to keys can be found
# at https://docs.qtile.org/en/latest/manual/config/lazy.html
# Switch between windows
Key([mod], "h", lazy.layout.left(), desc="Move focus to left"),
Key([mod], "l", lazy.layout.right(), desc="Move focus to right"),
Key([mod], "j", lazy.layout.down(), desc="Move focus down"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up"),
Key([mod], "space", lazy.layout.next(), desc="Move window focus to other window"),
# Move windows between left/right columns or move up/down in current stack.
# Moving out of range in Columns layout will create new column.
Key(
[mod, "shift"], "h", lazy.layout.shuffle_left(), desc="Move window to the left"
),
Key(
[mod, "shift"],
"l",
lazy.layout.shuffle_right(),
desc="Move window to the right",
),
Key([mod, "shift"], "j", lazy.layout.shuffle_down(), desc="Move window down"),
Key([mod, "shift"], "k", lazy.layout.shuffle_up(), desc="Move window up"),
# Grow windows. If current window is on the edge of screen and direction
# will be to screen edge - window would shrink.
Key([mod, "control"], "h", lazy.layout.grow_left(), desc="Grow window to the left"),
Key(
[mod, "control"], "l", lazy.layout.grow_right(), desc="Grow window to the right"
),
Key([mod, "control"], "j", lazy.layout.grow_down(), desc="Grow window down"),
Key([mod, "control"], "k", lazy.layout.grow_up(), desc="Grow window up"),
Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key(
[mod, "shift"],
"Return",
lazy.layout.toggle_split(),
desc="Toggle between split and unsplit sides of stack",
),
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "w", lazy.window.kill(), desc="Kill focused window"),
Key([mod, "control"], "r", lazy.reload_config(), desc="Reload the config"),
Key(
[mod, "shift", "control"],
"q",
lazy.shutdown(),
desc="Shutdown Qtile",
),
Key(
[mod, "control"],
"x",
lazy.spawn(f"sh {personal_bin}/lock.sh"),
desc="Lock screen",
),
Key([mod], "d", lazy.spawn(f"rofi -show run"), desc="Spawn rofi launcher"),
Key(
[mod],
"F10",
lazy.spawn("/usr/bin/xfce4-screenshooter"),
desc="Run XFCE4 screenshooter tool",
),
Key([mod], "f", lazy.window.toggle_fullscreen(), desc="Toggle full screen"),
Key(
[mod],
"space",
lazy.widget["keyboardlayout"].next_keyboard(),
desc="Next keyboard layout",
),
Key(
[mod],
"p",
lazy.spawn(f"rofi -modi 'clipboard:greenclip print' -show"),
desc="Clipboard history",
),
]
groups = [
Group(i, **kwargs)
for i, kwargs in [
(
"1",
{
"layout": "max",
"spawn": ("google-chrome-stable",),
"matches": [Match(wm_class=["google-chrome"])],
},
),
(
"2",
{
"layout": "monadtall",
"spawn": (f"{terminal}",),
"matches": [Match(wm_class=[f"{terminal}"])],
},
),
("3", {"layout": "monadtall", "spawn": ("emacs",)}),
("4", {"layout": "monadtall"}),
("5", {"layout": "monadtall"}),
("6", {"layout": "monadtall"}),
("7", {"layout": "monadtall"}),
("8", {"layout": "monadtall"}),
("9", {"layout": "monadtall", "spawn": ("ferdi",)}),
(
"0",
{
"layout": "monadtall",
"spawn": ("spotify --force-device-scale-factor=2.0",),
"matches": [Match(wm_class=["spotify"])],
},
),
]
]
for i in groups:
keys.extend(
[
# mod1 + letter of group = switch to group
Key(
[mod],
i.name,
lazy.group[i.name].toscreen(),
desc="Switch to group {}".format(i.name),
),
# mod1 + shift + letter of group = switch to & move focused window to group
Key(
[mod, "shift"],
i.name,
lazy.window.togroup(i.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(i.name),
),
# Or, use below if you prefer not to switch to that group.
# # mod1 + shift + letter of group = move focused window to group
# Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
# desc="move focused window to group {}".format(i.name)),
]
)
scratchpads = [
ScratchPad(
"scratchpad",
dropdowns=(
DropDown(
"terminal",
f"{terminal}",
opacity=0.98,
on_focus_lost_hide=True,
height=0.8,
),
DropDown(
"emacs", "emacs", opacity=0.98, on_focus_lost_hide=True, height=0.9
),
),
)
]
groups = groups + scratchpads
keys = keys + [
Key([mod], "minus", lazy.group["scratchpad"].dropdown_toggle("terminal")),
Key([mod], "equal", lazy.group["scratchpad"].dropdown_toggle("emacs")),
]
margin = 8
border_width = 0
layouts = [
layout.MonadTall(border_width=border_width, margin=margin),
layout.MonadWide(border_width=border_width, margin=margin),
layout.Columns(
border_focus_stack=["#d75f5f", "#8f3d3d"],
border_width=border_width,
margin=margin,
),
layout.Max(border_width=border_width, margin=margin),
layout.Bsp(border_width=border_width, margin=margin),
layout.Stack(num_stacks=2, border_width=border_width, margin=margin),
layout.Matrix(border_width=border_width, margin=margin),
layout.RatioTile(border_width=border_width, margin=margin),
layout.Tile(border_width=border_width, margin=margin),
layout.TreeTab(border_width=border_width, margin=margin),
layout.VerticalTile(border_width=border_width, margin=margin),
layout.Zoomy(border_width=border_width, margin=margin),
]
widget_defaults = dict(
font="Hack",
fontsize=30,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
top=bar.Bar(
[
widget.CurrentLayoutIcon(),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=10),
widget.GroupBox(
borderwidth=1,
disable_drag=True,
font="Hack",
highlight_method="text",
active=gruvbox_dark["foreground"],
this_current_screen_border=gruvbox_dark["yellow"],
),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=bar.STRETCH),
widget.Sep(),
widget.Spacer(length=20),
widget.ThermalSensor(fmt=" {}", font="Hack Nerd Font"),
widget.Spacer(length=10),
widget.Sep(),
widget.Volume(fmt=" 墳 {}", font="Hack Nerd Font"),
widget.Spacer(
length=10,
),
widget.Sep(),
widget.Spacer(length=10),
widget.OpenWeather(
fmt=" {} ",
font="Hack Nerd Font",
location="Florianopolis",
format="fln: {main_temp} °{units_temperature}",
mouse_callbacks={
"Button1": lambda: webbrowser.open_new_tab(
"https://wttr.in/florianopolis"
)
},
),
widget.OpenWeather(
font="Hack Nerd Font",
location="Amsterdam",
format="ams: {main_temp} °{units_temperature}",
mouse_callbacks={
"Button1": lambda: webbrowser.open_new_tab(
"https://wttr.in/amsterdam"
)
},
),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=10),
widget.Maildir(
fmt=" {}",
font="Hack Nerd Font",
maildir_path="~/mail/personal",
sub_folders=(
{"label": "i", "path": "inbox"},
{"label": "a", "path": "archives"},
),
mouse_callbacks={
"Button1": lambda: webbrowser.open_new_tab("https://gmail.com")
},
),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=10),
widget.KeyboardLayout(
configured_keyboards=("us", "br"), fmt=" {}", font="Hack Nerd Font"
),
widget.Spacer(length=10),
widget.Sep(),
widget.Battery(
format=" {char} {percent:2.0%} {hour:d}:{min:02d}/{watt:.2f}W",
charge_char="",
discharge_char="",
font="Hack Nerd Font",
empty_char="",
full_char="",
notify_bellow=20,
show_short_text=False,
),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=10),
widget.Clock(
format="%H:%M:%S",
fmt=" {} ",
font="Hack Nerd Font",
mouse_callbacks={
"Button1": lambda: webbrowser.open_new_tab(
"https://calendar.google.com/calendar/u/0/r"
)
},
),
widget.Clock(
format="%h %d %Y",
fmt=" {}",
font="Hack Nerd Font",
mouse_callbacks={
"Button1": lambda: webbrowser.open_new_tab(
"https://calendar.google.com/calendar/u/0/r"
)
},
),
widget.Spacer(length=10),
widget.Sep(),
widget.Spacer(length=10),
widget.Systray(icon_size=40),
widget.Sep(),
],
size=60,
margin=8,
background=gruvbox_dark["background"],
border_width=[0, 0, 0, 0], # Draw top and bottom borders
),
),
]
# Drag floating layouts.
mouse = [
Drag(
[mod],
"Button1",
lazy.window.set_position_floating(),
start=lazy.window.get_position(),
),
Drag(
[mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()
),
Click([mod], "Button2", lazy.window.bring_to_front()),
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(
float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class="confirmreset"), # gitk
Match(wm_class="makebranch"), # gitk
Match(wm_class="maketag"), # gitk
Match(wm_class="ssh-askpass"), # ssh-askpass
Match(title="branchdialog"), # gitk
Match(title="pinentry"), # GPG key password entry
Match(title="qalculate-gtk"), # qalculate-gtk
],
border_width=border_width,
)
auto_fullscreen = True
focus_on_window_activation = "smart"
reconfigure_screens = True
# If things like steam games want to auto-minimize themselves when losing
# focus, should we respect this or not?
auto_minimize = True
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
| benmezger/dotfiles | dot_config/qtile/config.py | Python | mit | 15,469 |
# Author: Pascal Bleser <pascal.bleser@opensuse.org>
# This file is licensed under the GNU General Public License version 2
# A copy of the license is available here: http://www.gnu.org/licenses/gpl-2.0.txt
from webpin.util import *
from webpin.const import *
class PackageManager:
def __init__(self, name):
self.name = name
pass
def _buildCacheHook(self):
pass
def _hasBuildServiceChannel(self, name, buildServiceName, url):
pass
def _hasChannel(self, name, url):
pass
def _fallbackChannelStrategy(self, name, url):
return None
def hasChannel(self, name, url):
url = normalizeURL(url)
self._buildCacheHook()
result = None
#
# Special handling for some repositories here...
# (ugly, will find a more pluggable/clean way later)
#
m = buildServiceRegex.match(url)
if m:
result = self._hasBuildServiceChannel(name, m.group(1), url)
elif name == "guru":
# check for yast2 and rpm-md repository matches
result = self._hasChannel(name, url)
if not result:
if url.endswith('/RPMS'):
# tried the RPM-MD URL, now try the YaST2 repo URL:
return self._hasChannel(name, url[:-5])
else:
# tried the YaST2 repo URL, now try the RPM-MD one:
return self._hasChannel(name, url + '/RPMS')
pass
pass
elif url.endswith('/suse'):
result = self._hasChannel(name, url)
if not result:
result = self._hasChannel(name, url[:-5])
pass
pass
elif url.endswith('/inst-source'):
result = self._hasChannel(name, url)
if not result:
result = self._hasChannel(name, url + '/suse')
pass
pass
else:
result = self._hasChannel(name, url)
pass
# fallback strategies, let's try some fuzzy logic (sort of):
if not result:
result = self._fallbackChannelStrategy(name, url)
pass
return result
pass
| openSUSE/webpin-cli | webpin/PackageManager.py | Python | gpl-2.0 | 1,808 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# #
# This file is part of 3d Brain Atlas Reconstructor #
# #
# Copyright (C) 2010-2012 Piotr Majka, Jakub M. Kowalski #
# #
# 3d Brain Atlas Reconstructor is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# #
# 3d Brain Atlas Reconstructor is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with 3d Brain Atlas Reconstructor. If not, see #
# http://www.gnu.org/licenses/. #
# #
###############################################################################
"""
The module provides classes necessary to handle basic CAF dataset index
manipulations.
G{importgraph}
@var CONF_HIERARCHY_ROOT_NAME: Default value of C{L{barIndexer.__init__}()}
argument.
"""
CONF_HIERARCHY_ROOT_NAME = "Brain"
CONF_ALIGNER_REFERENCE_COORDS = ( 1.0, -6.0, 0.01, -0.01)
import os
import sys
import unicodedata
import xml.dom.minidom as dom
import base
from string import *
def remove_accents(input_str):
nkfd_form = unicodedata.normalize('NFKD', unicode(input_str))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
class barIndexerObject(base.barObject):
"""
Virtual class parental to all classes defined in the module.
"""
def __init__(self):
pass
@classmethod
def fromXML(cls, sourceXMLElement):
"""
Creates object from its (valid) xml representation. XML representation
may be either string containing a path to an XML document, an object of
xml.dom.Node class, an XML string or an object containing a method named
read that returns an XML string.
@type sourceXMLElement: xml.dom.Node or str or file
@param sourceXMLElement: XML representation of the L{barIndexerObject} on which
the object is based
@return: created object
@rtype: cls
"""
raise NotImplementedError, "Virtual method executed."
class barIndexerElement(barIndexerObject):
"""
Virtual class parental to classes representing CAF dataset index file
elements.
@cvar _elementName: name of the XML element represented by class instances
@type _elementName: str
@cvar _elementAttributes: names of attributes of the XML element represented by class
instances
@type _elementAttributes: [str, ...]
"""
_elementAttributes = []
_elementName = None
def getXMLelement(self):
"""
@return: dom XML object describing the object
"""
retDocument = dom.Document()
retDocument.endocing = base.BAR_XML_ENCODING
retElement = retDocument.createElement(self._elementName)
# Put all attrubutes into text element
for attribName in self._elementAttributes:
if hasattr(self, attribName):
attribValue = getattr(self, attribName)
if attribValue != None:
# Sun Dec 25 12:11:33 CET 2011
# TODO: If given argument is string, try to properly encode
# it into unicode (still requires a lot of testing)
# If type is other than string, just convert if to string
# (regular string, not unicode)
if isinstance(attribValue, basestring):
try:
retElement.setAttribute(attribName, attribValue.decode("utf-8" ).encode("utf-8"))
except:
print >>sys.stderr, "An serialization error has been encountered. Replacing utf8 data with ascii characters."
retElement.setAttribute(attribName, remove_accents(attribValue))
else:
retElement.setAttribute(attribName, str(attribValue))
return retElement
@classmethod
def fromXML(cls, sourceXMLElement, retClassName = None, indexer=None):
"""
Creates object from its (valid) xml representation.
@note: method designed to be utilized only by methods overriding it in
derived classes
@type sourceXMLElement: xml.dom.Node
@param sourceXMLElement: XML description of the C{barIndexer} object
to be created
@type retClassName: class
@param retClassName: class of the object to be created
@type indexer: L{barIndexer}
@param indexer: the object encapsulating returned class instance
@return: object based on sourceXMLElement description
(initiation of the object might be unfinished)
@rtype: L{retClassName}
"""
assert retClassName, "No return class given"
attrs = {}
for attribute in retClassName._elementAttributes:
if sourceXMLElement.hasAttribute(attribute):
attrs[attribute] = sourceXMLElement.getAttribute(attribute)
return retClassName(**attrs)
class barIndexerPropertyElement(barIndexerElement):
"""
Class of objects representing 'property' elements of CAF dataset index file.
@ivar _type: property name
@type _type: str
@ivar _value: property value
@type _value: str
"""
_elementName = 'property'
_elementAttributes = ['type', 'value']
def __init__(self, type, value):
"""
@param type: value of the 'type' attribute of represented XML element
@type type: str
@param value: value of the 'value' attribute of represented XML element
@type value: str
"""
self._type = type
self._value = value
def __getType(self):
"""
@return: value of the 'type' attribute of represented XML element
"""
return self._type
def __setType(self, newType):
"""
Sets value of the 'type' attribute to L{newType}.
@param newType: new value of the 'type' attribute of represented XML element
@type newType: str
"""
self._type = newType
def __getValue(self):
"""
@return: value of the 'value' attribute of represented XML element
"""
return self._value
def __setValue(self, newValue):
"""
Sets value of the 'value' attribute to L{newValue}.
@param newValue: new value of the 'value' attribute of represented XML element
@type newValue: str
"""
self._value = newValue
@classmethod
def fromXML(cls, sourceXMLElement, indexer=None):
"""
Creates object from its (valid) xml representation.
@type sourceXMLElement: xml.dom.Node
@param sourceXMLElement: XML description of the C{barIndexerPropertyElement} object
to be created
@type indexer: L{barIndexer}
@param indexer: the object encapsulating returned class instance
@return: object based on L{sourceXMLElement} description
@rtype: cls
"""
return barIndexerElement.fromXML(sourceXMLElement, cls, indexer=indexer)
type = property(__getType, __setType)
"""
The 'type' attribute of represented 'property' element.
@type: str
"""
name = type
"""
An alias for L{type} property.
"""
value = property(__getValue,__setValue)
"""
The 'value' attribute of represented 'property' element.
@type: str
"""
class barIndexerGroupElement(barIndexerElement):
"""
Class of objects representing 'group' elements of CAF dataset index file.
@ivar _name: value of the 'name' attribute of represented XML element
@type _name: str
@ivar _id: value of the 'id' attribute of represented XML element
@type _id: int
@ivar _fill: value of the 'fill' attribute of represented XML element
@type _fill: str
@ivar _fullname: value of the 'fullname' attribute of represented XML element
@type _fullname: str
@ivar _uid: value of the 'uid' attribute of represented XML element
@type _uid: int
@ivar _structure: object handling manipulation of the 'structure' element related
to the represented element; its L{uid<barIndexerStructureElement.uid>}
value overrides the L{_uid} value if given
@type _structure: L{barIndexerStructureElement}
@ivar parent: object handling manipulations of the 'group' element parental to the
represented element
@type parent: L{barIndexerGroupElement}
@ivar children: objects handling manipulations of the 'group' elements child to the
represented element
@type children: [L{barIndexerGroupElement}, ...]
"""
_elementName = 'group'
_elementAttributes = ['name', 'id', 'fill', 'fullname', 'uid', 'ontologyid']
def __init__(self, **kwargs):
"""
Accepted keyword arguments:
- 'name' (str) - value of the 'name' attribute of represented XML
element,
- 'id' (int) - value of the 'id' attribute of represented XML
element,
- 'fill' (str) - value of the 'fill' attribute of represented XML
element,
- 'fullname' (str) - value of the 'fullname' attribute of represented
XML element,
- 'uid' (int) - value of the 'uid' attribute of represented XML
element,
- 'structure' (L{barIndexerStructureElement}) - object handling
manipulation of the structure related to the group;
its L{uid<barIndexerStructureElement.uid>} overrides the L{uid}
argument if given,
- 'ontologyid' - value of the 'ontologyid' of represented XML.
"""
self.structure = None
# Set initial settings
for attribute in self._elementAttributes:
setattr(self, attribute, kwargs.get(attribute))
if 'structure' in kwargs:
self.structure = kwargs['structure']
# By default parents and children list is empty
self.parent = None
self.children = []
def __getName(self):
"""
@return: value of the 'name' attribute of represented XML element
@rtype: str
"""
return self._name
def __setName(self, newName):
"""
Set the value of the 'name' attribute of represented XML element.
@param newName: a new value of the 'name' attribute of represented XML element
@type newName: str
"""
self._name = newName
def __getGID(self):
"""
@return: value of the 'id' attribute of represented XML element
@rtype: int
"""
return self._id
def __setGID(self, newID):
"""
Set value of the 'id' attribute of represented XML element.
@param newID: value of the 'id' attribute of represented XML element
@type newID: int
"""
self._id = int(newID)
def __getFill(self):
"""
@return: value of the 'fill' attribute of represented XML element
@rtype: str
"""
return self._fill
def __setFill(self, newFill):
"""
Set value of the 'fill' attribute of represented XML element.
@param newFill: value of the 'fill' attribute of represented XML element
@type newFill: str
"""
self._fill = newFill
def __getFullname(self):
"""
@return: value of the 'fullname' attribute of represented XML element
@rtype: str
"""
return self._fullname
def __setFullname(self, newFullName):
"""
Set value of the 'fullname' attribute of represented XML element.
@param newFullName: value of the 'fullname' attribute of represented XML element
@type newFullName: str
"""
if newFullName:
self._fullname = newFullName
def __getUID(self):
"""
@return: value of the 'uid' attribute of represented XML element
@rtype: int
"""
if self._structure != None:
return self._structure.uid
return self._uid
def __setUID(self, newUID):
"""
Set value of the 'uid' attribute of represented XML element.
@param newUID: value of the 'uid' attribute of represented XML element
@type newUID: int
@note: If L{_structure} is assigned the ValueError is raisen.
"""
# uid is read only when structure is assigned
if self._structure != None:
raise ValueError, "uid is read only property when corresponding structure is assigned."
# uid can be none if the group is not among paths
# error if None given to int()
if newUID == None:
nuid = newUID
else:
nuid = int(newUID)
self._uid = nuid
def __getStructure(self):
"""
@return: object representing the 'structure' element related to represented element
@rtype: L{barIndexerStructureElement}
"""
return self._structure
def __setStructure(self, newStructure):
"""
Assign L{_structure} an object representing the 'structure' element related to represented element.
@param newStructure: an object representing the 'structure' element related
to represented element
@type newStructure: L{barIndexerStructureElement}
"""
if newStructure == None:
self._uid = None
self._structure = newStructure
def __getUidList(self):
"""
Return list of 'uid' attributes of 'structure' elements assigned to the represented element.
'structure' elements are gathered from all children as well as from the root element.
@rtype: [int, ...]
@return: identifiers of 'structure' elements related to the represented
hierarchy group.
"""
allUids = base.flatten(\
self.__getMappedChildList(depth=999, properties=('uid',)))
return filter(lambda x: isinstance(x, int), allUids)
def __setUidList(self, newValues):
"""
Raise ValueError.
"""
raise ValueError, "uidList is read only property."
def getXMLelement(self):
groupElement = barIndexerElement.getXMLelement(self)
orderedChildren = sorted(self.children, key = lambda x: x.name)
for childElement in orderedChildren:
groupElement.appendChild(childElement.getXMLelement())
return groupElement
@classmethod
def fromXML(cls, sourceXMLElement, indexer=None):
"""
Creates object from its (valid) xml representation.
@type sourceXMLElement: xml.dom.Node
@param sourceXMLElement: XML description of the C{barIndexerGroupElement} object
to be created.
@type indexer: L{barIndexer}
@param indexer: the object encapsulating returned class instance
@return: Object based on sourceXMLElement description.
@rtype: cls
"""
result = barIndexerElement.fromXML(sourceXMLElement, cls, indexer=indexer)
for child in sourceXMLElement.childNodes:
if child.nodeType == dom.Node.ELEMENT_NODE:
result.children.append(cls.fromXML(child))
return result
def getStructureNameIterator(self, depth=999):
"""
@param depth: hierarchy tree iteration depth limit
@type depth: int
@return: iterator over names of structures composing the group
in the hierarchy tree deep up to the given L{depth}
@rtype: generator
"""
if self._structure != None:
yield self._structure.name
elif self._uid != None:
yield self.name
if depth > 0:
for child in self.children:
for name in child.getStructureNameIterator(depth - 1):
yield name
def getVisibleGroupIterator(self, depth=999, leavesOnly=False):
"""
@param depth: hierarchy tree iteration depth limit
@type depth: int
@param leavesOnly: True if requested to iterate over names of
the leaves of the iteration tree only, False
otherwise
@type leavesOnly: bool
@return: iterator over groups ascendant to any structure included
in CAF slides in the hierarchy tree deep up to the given
L{depth}
@rtype: generator
"""
isVisible = self.uid != None
isNotLeaf = False
for group in self.children:
if depth == 0:
if isVisible:
break
for subgroup in group.getVisibleGroupIterator(0, leavesOnly):
isVisible = True
break
else:
for subgroup in group.getVisibleGroupIterator(depth - 1, leavesOnly):
isVisible = True
isNotLeaf = True
yield subgroup
if isVisible and not (leavesOnly and isNotLeaf):
yield self
def getChildList(self, depth=1):
"""
An alias for C{self.L{__getMappedChildList<barIndexerGroupElement.__getMappedChildList>}(depth, ('name',))}
"""
return self.__getMappedChildList(depth, ('name',))
def getNameUidChildList(self, depth=1):
"""
An alias for C{self.L{__getMappedChildList<barIndexerGroupElement.__getMappedChildList>}(depth, ('name', 'id'))}
"""
return self.__getMappedChildList(depth, ('name', 'id'))
def getNameFullNameUid(self, depth=1):
"""
An alias for C{self.L{__getMappedChildList<barIndexerGroupElement.__getMappedChildList>}(depth, ('name', 'fullname', 'id'))}
"""
return self.__getMappedChildList(depth, ('name', 'fullname', 'id'))
def printHierarchyTree(self, depth = 0):
"""
Print hierarchy tree of 'group' elements rooted in the represented element.
@type depth: int
@param depth: level of nesting - margin for printing
@return: tree representation of hierarchy
@rtype: str
"""
tree = self.children
if depth == 0: print self.name
if tree == None or len(tree) == 0:
pass
else:
for val in tree:
print >>sys.stderr, "|"+" | " * depth, val.name
val.printHierarchyTree(depth+1)
def nextSibling(self):
"""
An alias for C{self.L{_getSibling<barIndexerGroupElement._getSibling>}(+1)}.
"""
return self._getSibling(+1)
def prevSibling(self):
"""
An alias for C{self.L{_getSibling<barIndexerGroupElement._getSibling>}(-1)}.
"""
return self._getSibling(-1)
def _getSibling(self, direction = -1):
"""
@type direction: int
@param direction: determines, if next or previous sibling is returned
@rtype: L{barIndexerGroupElement}
@return: next (if C{L{direction} = +1}) or previous (if C{L{direction} = -1})
sibling of given group element. If given group element has no parent,
the element itself is returned.
"""
if not self.parent:
return self
spc = self.parent.children
index = spc.index(self) + direction
if index < 0:
index = 0
if index > len(spc)-1:
index = len(spc)-1
return spc[index]
def __getMappedChildList(self, depth = 1, properties = ('name','id')):
"""
@param depth: height of the returned hierarchy tree
@type depth: int
@param properties: requested attribute names
@type properties: (str, ...)
@return: tuple-based hierarchy tree containing tuples with requested attributes
of 'group' elements rooted in the represented element
@rtype: tuple
"""
if depth >= 0:
retList = map(lambda x:\
x.__getMappedChildList(depth - 1, properties), self.children)
mappedInfo = tuple(map(lambda x: getattr(self,x), properties))
if retList == []:
return (mappedInfo,)
else:
if all(retList):
return (mappedInfo, tuple(retList))
else:
return (mappedInfo,)
name = property(__getName, __setName)
"""
The 'name' attribute of represented XML element.
@type: str
"""
id = property(__getGID, __setGID)
"""
The 'id' attribute of represented XML element.
@type: int
"""
fill = property(__getFill, __setFill)
"""
The 'fill' attribute of represented XML element.
@type: str
"""
fullname = property(__getFullname, __setFullname)
"""
The 'fullname' attribute of represented XML element.
@type: str
"""
uid = property(__getUID, __setUID)
"""
The 'uid' attribute of represented XML element.
Read-only property if a L{barIndexerStructureElement} object is assigned
to the object.
@type: int
"""
uidList = property(__getUidList, __setUidList)
"""
List of 'uid' attributes of 'structure' elements assigned to the represented element.
Read-only property.
@type: [int, ...]
"""
structure = property(__getStructure, __setStructure)
"""
Object representing 'structure' element related to the represented element.
@type: L{barIndexerStructureElement}
"""
class barIndexerSlideElement(barIndexerElement):
"""
Class of objects representing 'slide' elements of CAF dataset index file.
@ivar _coronalcoord: value of the 'coronalcoord' attribute of the represented element
@type _coronalcoord: str
@ivar _slidenumber: value of the 'slidenumber' attribute of the represented element
@type _slidenumber: int
@ivar _transformationmatrix: value of the 'transformationmatrix' attribute of the represented element
@type _transformationmatrix: (float, float, float, float)
"""
_elementName = 'slide'
_elementAttributes = ['coronalcoord',
'slidenumber',
'transformationmatrix']
def __init__(self, coronalcoord, slidenumber, transformationmatrix):
"""
@param coronalcoord: value of the 'coronalcoord' attribute of the represented element
@type coronalcoord: str
@param slidenumber: value of the 'slidenumber' attribute of the represented element
@type slidenumber: convertable to int
@param transformationmatrix: value of the 'transformationmatrix' attribute of the represented element
@type transformationmatrix: (float, float, float, float)
"""
self.coronalcoord = coronalcoord
self.slideNumber = slidenumber
self.transformationmatrix = transformationmatrix
def __getCoronalCoord(self):
"""
@return: value of the 'coronalcoord' attribute of the represented element
@rtype: str
"""
return self._coronalcoord
def __setCoronalCoord(self, newValue):
"""
Set value of the 'coronalcoord' attribute of represented XML element.
@param newValue: value of the 'coronalcoord' attribute of represented XML element
@type newValue: str
"""
self._coronalcoord = newValue
def __getSlideNumber(self):
"""
@return: value of the 'slidenumber' attribute of represented XML element
@rtype: int
"""
return self._slidenumber
def __setSlideNumber(self, newValue):
"""
Set value of the 'slidenumber' attribute of represented XML element.
@param newValue: value of the 'slidenumber' attribute of represented XML element
@type newValue: convertable to int
"""
self._slidenumber = int(newValue)
def __getTransfomarionMatrix(self):
"""
@return: value of the 'transformationmatrix' attribute of represented XML element
@rtype: (float, float, float, float)
"""
return self._transformationmatrix
def __setTransformationMatrix(self, newValue):
"""
Set value of the 'transformationmatrix' attribute of represented XML element.
@param newValue: value of the 'transformationmatrix' attribute of represented XML element
@type newValue: (float, float, float, float)
"""
self._transformationmatrix = newValue
def getXMLelement(self):
slideElement = barIndexerElement.getXMLelement(self)
slideElement.setAttribute('transformationmatrix',
','.join(map(str, self.transformationmatrix)))
return slideElement
@classmethod
def fromXML(cls, domXMLElement, indexer=None):
"""
Creates object from its (valid) xml representation.
@type domXMLElement: xml.dom.Node
@param domXMLElement: XML description of the C{barIndexerSlideElement} object
to be created
@type indexer: L{barIndexer}
@param indexer: the object encapsulating returned class instance
@return: object based on L{domXMLElement} description
@rtype: cls
"""
transformationmatrix = tuple(map(float,
domXMLElement.getAttribute('transformationmatrix').split(',')))
return cls(coronalcoord=domXMLElement.getAttribute('coronalcoord'),
slidenumber=domXMLElement.getAttribute('slidenumber'),
transformationmatrix=transformationmatrix)
coronalcoord = property(__getCoronalCoord, __setCoronalCoord)
"""
Value of the 'coronalcoord' attribute of represented XML element.
@type: str
"""
slideNumber = property(__getSlideNumber, __setSlideNumber)
"""
Value of the 'slidenumber' attribute of represented XML element.
@type: int
"""
slidenumber = slideNumber
"""
An alias for C{self.L{slideNumber<barIndexerSlideElement.slideNumber>}}
"""
name = slideNumber
"""
An alias for C{self.L{slideNumber<barIndexerSlideElement.slideNumber>}}
"""
transformationmatrix = property(__getTransfomarionMatrix, __setTransformationMatrix)
"""
Value of the 'transformationmatrix' attribute of represented XML element.
@type: (float, float, float, float)
"""
class barIndexerStructureElement(barIndexerElement):
"""
Class of objects representing 'structure' elements of CAF dataset index file.
@ivar _name: value of the 'name' attribute of the represented element
@type _name: str
@ivar _bbx: value of the 'bbx' attribute of the represented element
@type _bbx: L{barIndexerStructureElement._clsBoundingBox}
@ivar _uid: value of the 'uid' attribute of the represented element
@type _uid: int
@ivar _type: value of the 'type' attribute of the represented element
@type _type: str
@ivar _slides: 'slide' element attribute 'slidenumber' to 'slide' element
representation mapping for 'slide' elements associated with
represented 'structure' element
@type _slides: {int: L{barIndexerSlideElement}, ...}
"""
_elementName = 'structure'
_elementAttributes = ['name', 'bbx', 'type', 'uid']
def __init__(self, name, bbx, uid, type = None, slideList = None):
"""
@param name: value of the 'name' attribute of the represented element
@type name: str
@param bbx: value of the 'bbx' attribute of the represented element
@type bbx: L{barIndexerStructureElement._clsBoundingBox}
@param uid: value of the 'uid' attribute of the represented element
@type uid: convertable to int
@param type: value of the 'type' attribute of the represented element
@type type: str
@param slideList: representations of 'slide' elements related to
the represented element
@type slideList: L{barIndexerSlideElement} or [L{barIndexerSlideElement}, ...]
"""
self.name = name
self.bbx = bbx
self.uid = uid
self.type = type
self._slides = dict()
if slideList != None:
self.addSlide(slideList)
def getXMLelement(self):
structureDocument = dom.Document()
structureDocument.encoding = base.BAR_XML_ENCODING
structureElement = barIndexerElement.getXMLelement(self)
slideListElement = structureDocument.createElement('slides')
slideListText = " ".join(map(str, self.slideList))
slideListNode = structureDocument.createTextNode(slideListText)
slideListElement.appendChild(slideListNode)
structureElement.appendChild(slideListElement)
return structureElement
@classmethod
def fromXML(cls, domXMLElement, indexer=None):
"""
Creates object from its (valid) xml representation.
@type domXMLElement: xml.dom.Node
@param domXMLElement: XML description of the C{barIndexerStructureElement}
object to be created.
@type indexer: L{barIndexer}
@param indexer: the object encapsulating returned class instance
@return: object based on L{domXMLElement} description
@rtype: cls
"""
result = barIndexerElement.fromXML(domXMLElement, cls, indexer=indexer)
result.bbx = cls._clsBoundingBox(tuple(map(float, result.bbx.split(','))))
for slides in domXMLElement.getElementsByTagName('slides'):
for text in slides.childNodes:
if text.nodeType == dom.Node.TEXT_NODE:
slideNumbers = map(int, text.data.strip().split())
map(lambda x: result.addSlide(indexer.slides[x]), slideNumbers)
return result
def __getBbx(self):
"""
@return: value of the 'bbx' attribute of the represented element
@rtype: L{barIndexerStructureElement._clsBoundingBox}
"""
return self._bbx
def __setBbx(self, newValue):
"""
Set value of the 'bbx' attribute of the represented element.
@param newValue: value of the 'bbx' attribute of the represented element
@type newValue: L{barIndexerStructureElement._clsBoundingBox}
"""
self._bbx = newValue
def __getName(self):
"""
@return: value of the 'name' attribute of the represented element
@rtype: str
"""
return self._name
def __setName(self, newValue):
"""
Set value of the 'name' attribute of the represented element.
@param newValue: value of the 'name' attribute of the represented element
@type newValue: str
"""
self._name = newValue
def __getUid(self):
"""
@return: value of the 'uid' attribute of the represented element
@rtype: int
"""
return self._uid
def __setUid(self, newValue):
"""
Set value of the 'uid' attribute of the represented element.
@param newValue: value of the 'uid' attribute of the represented element
@type newValue: convertable to int
"""
self._uid = int(newValue)
def __getType(self):
"""
@return: value of the 'type' attribute of the represented element
@rtype: str
"""
return self._type
def __setType(self, newValue):
"""
Set value of the 'type' attribute of the represented element.
@param newValue: value of the 'type' attribute of the represented element
@type newValue: str or None
"""
assert type(newValue) is str or type(newValue) is unicode\
or newValue == None, "String or 'None' value expected"
self._type = newValue
def __getSlideList(self):
"""
@return: an ordered list of 'slidenumber' attributes of 'slide' elements
related to the represented element
@rtype: [int, ...]
"""
return sorted(list(self._slides))
def __setSlideList(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "Slide list is readonly property."
def addSlide(self, slidesToAppend):
"""
Assign 'slide' element(s) to the represented element.
@param slidesToAppend: representation of 'slide' element(s) to be assigned
to the represented element
@type slidesToAppend: L{barIndexerSlideElement} or [L{barIndexerSlideElement}, ...]
"""
if not hasattr(slidesToAppend, '__getitem__'):
slidesToAppend = [slidesToAppend]
self._slides.update((x.slidenumber, x) for x in slidesToAppend)
def __getSlideSpan(self):
"""
@rtype: (int, int)
@return: The lowest and the highest value of 'slidenumber' attribute of
'slide' elements related to represented element
"""
return ( min(self.slideList), max(self.slideList) )
def __setSlideSpan(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "Slide span is readonly property."
name = property(__getName, __setName)
"""
Value of the 'name' attribute of the represented element.
@type: str
"""
bbx = property(__getBbx, __setBbx)
"""
Value of the 'bbx' attribute of the represented element.
@type: L{barIndexerStructureElement._clsBoundingBox}
"""
uid = property(__getUid, __setUid)
"""
Value of the 'uid' attribute of the represented element.
@type: int
"""
type = property(__getType, __setType)
"""
Value of the 'type' attribute of the represented element.
@type: str
"""
slideList= property(__getSlideList, __setSlideList)
"""
Ordered list of 'slidenumber' attributes of 'slide' elements related
to the represented element
Read-only property.
@type: [int, ...]
"""
slideSpan= property(__getSlideSpan, __setSlideSpan)
"""
Slide span (the lowest and the highest value of 'slidenumber' attribute
of 'slide' elements related to represented element).
Read-only property.
@type: (int, int)
"""
class barIndexer(barIndexerObject):
"""
Class of objects representing whole CAF dataset index.
The class provides also methods to create CAF dataset index de novo.
It is assumed that structure names are unique and it is not necessary to introduce
another uniqe ID. However, a kind of UID (unique ID) is introduced inside
the index. The UID is used to create complex structure hierarchy.
Please note that this class operates in two ways when creating CAF dataset index:
1. First stage is collecting information about slides and structures.
When slide is parsed, module extracts all information about slide number,
spatial coordinates etc.
2. Second stage is generating XML representation of stored data.
XML file is generated and saved.
@note: Be advised that hierarchy and mappings should be assigned in defined
order:
1. Indexing all slides
2. Defining hierarchy (using L{createFlatHierarchy<createFlatHierarchy>}
or L{setParentsFromFile<setParentsFromFile>}),
3. Then assigning full name mapping using e.g. using
L{self.fullNameMapping<self.fullNameMapping>} or
L{setNameMappingFromFile<setNameMappingFromFile>},
4. Assigning color mapping using
L{self.colorMapping<self.colorMapping>} or
L{setColorMappingFromFile<setColorMappingFromFile>}.
@group color_mappings: *ColorMapping*
@group fullname_mappings: *NameMapping*
@group hierarchy: *Hierarchy*
@todo: Handle utf-8 structure names somehow...
@cvar _requiredInternalData: names of required CAF dataset properties
@type _requiredInternalData: [str, ...]
@cvar _elementsList: names of XML elements partitioning CAF dataset index file
@type _elementsList: [str, ...]
@cvar _initialIDs: initial values of UID/GID sequences
@type _initialIDs: {str: int}
@cvar _indexerElement: parental class for classes of objects representing
encapsulated elements
@type _indexerElement: class
@cvar _propertyElement: class of objects representing 'property' elements
@type _propertyElement: class
@cvar _groupElement: class of objects representing 'group' elements
@type _groupElement: class
@cvar _slideElement: class of objects representing 'slide' elements
@type _slideElement: class
@cvar _structureElement: class of objects representing 'structure' elements
@type _structureElement: class
@ivar _uid: current value of UID sequence
@type _uid: int
@ivar _gid: current value of GID sequence
@type _gid: int
@ivar _hierarchyGroups: hierarchy group name to object representing related
'group' element mapping
@type _hierarchyGroups: {str : L{_groupElement}, ...}
@ivar _slides: CAF slide number to object representing related 'slide' element
mapping
@type _slides: {int : L{_slideElement}, ...}
@ivar _properties: CAF dataset property name to object representing related
'property' element mapping
@type _properties: {str : L{_propertyElement}, ...}
@ivar _structures: structure name to object representing related 'structure'
element mapping
@type _structures: {str : L{_structureElement}, ...}
@ivar _fullNameMapping: cached hierarchy group name to full name mapping
@type _fullNameMapping: {str : str}
@ivar _colorMapping: cached hierarchy group name to colour mapping dictionary
@type _colorMapping: {str : str}
@ivar _hierarchyRootElementName: name of the superior group of the hierarhy
gathering all structures
@type _hierarchyRootElementName: str
@ivar cafDirectory: path to the directory where the CAF dataset index file
is located
@type cafDirectory: str
"""
_requiredInternalData = ['ReferenceWidth', 'ReferenceHeight',
'FilenameTemplate', 'RefCords', 'CAFName', 'CAFComment',
'CAFCreator', 'CAFCreatorEmail', 'CAFCompilationTime',
'CAFSlideUnits', 'CAFFullName', 'CAFAxesOrientation']
_elementsList = ['slideindex',
'atlasproperties',
'slidedetails',
'structureslist',
'hierarchy']
_initialIDs = {'uid': 100000,
'gid': 200000}
_indexerElement = barIndexerElement
_propertyElement = barIndexerPropertyElement
_groupElement = barIndexerGroupElement
_slideElement = barIndexerSlideElement
_structureElement = barIndexerStructureElement
def __init__(self, hierarchyRootElementName=CONF_HIERARCHY_ROOT_NAME):
"""
@type hierarchyRootElementName: str
@param hierarchyRootElementName: Name of the root element of the
hierarchy. The root element is
the superior group of the hierarhy
gathering all structures.
L{CONF_HIERARCHY_ROOT_NAME} by default.
"""
self._uid = self._initialIDs['uid'] # Initial number for UID generation
self._gid = self._initialIDs['gid'] # Initial number for GID generation
# Define empty structure hierarchy tree:
self._hierarchyGroups = {}
# Define placeholder for structures
self._slides = {}
self._properties = {}
self._structures = {}
self._fullNameMapping = None #Ultimately dict
self._colorMapping = None #Ultimately dict
self._hierarchyRootElementName = hierarchyRootElementName
# CAF dataset location
self.cafDirectory = None
def __fromXML(cls, sourceXMLElement):
cafdirectory = None
# Argument type chcecking
if type(sourceXMLElement) is str or type(sourceXMLElement) is unicode:
if os.path.exists(sourceXMLElement):
# sourceXMLElement is a valid file path
slideindexElement = dom.parse(sourceXMLElement)
cafdirectory = os.path.dirname(sourceXMLElement)
else:
# sourceXMLElement is assumed to be an XML string
slideindexElement = dom.parseString(sourceXMLElement)
elif isinstance(sourceXMLElement, dom.Node):
# sourceXMLElement is a dom.Node
slideindexElement = sourceXMLElement
elif hasattr(sourceXMLElement, 'read'):
# sourceXMLElement is object similiar to file-object
xmlString = sourceXMLElement.read()
slideindexElement = dom.parseString(xmlString)
else:
# unsupported sourceXMLElement type
raise TypeError, "Bad type of sourceXMLElement argument."
(propertiesElement,
slidedetailsElement,
structurelistElement,
hierarchyElement) =\
map(lambda x: slideindexElement.getElementsByTagName(x)[0],
cls._elementsList[1:])
result = cls()
result.cafDirectory = cafdirectory
# the mainloop list contains tuples
# (sourceXMLElement, elementClass, destination). Each tuple
# controls an iteration the of following for loop - the destination
# dictionary is being filled with instances of the elementClass class,
# that were based on children of the sourceXMLElement dom XML node
mainloop = [(propertiesElement, cls._propertyElement,
result._properties),
(slidedetailsElement, cls._slideElement,
result._slides),
(structurelistElement, cls._structureElement,
result._structures),
(hierarchyElement, cls._groupElement,
result._hierarchyGroups)]
for (sourceXMLElement, elementClass, destination) in mainloop:
for xmlElement in sourceXMLElement.childNodes:
if xmlElement.nodeType == dom.Node.ELEMENT_NODE and\
xmlElement.tagName == elementClass._elementName:
#print cls.__name__, elementClass.__name__
newElement = elementClass.fromXML(xmlElement, indexer=result)
# warning - may violate encapsulation of barIndexerPropertyElement
destination[newElement.name] = newElement
# According to CAF specification, the <hierarchy> XML node contains one
# <group> node, that is the root element of the hierarchy tree.
(result.hierarchyRootElementName, group) =\
result._hierarchyGroups.items()[0]
# The group hierarchy tree contains important information about
# color mapping, fullname mapping, hierarchy and UID/GID maximum value.
# The information has to be copied to the result object.
result.__addGroup(group)
result.__normaliseIDs()
return result
def normaliseIDs(self):
"""
An alias for C{self.L{__normaliseIDs}()}.
"""
return self.__normaliseIDs()
def __normaliseIDs(self):
"""
Normalise ID and UID of groups and structures.
"""
# reset ID generators
(self._gid, self._uid) = map(lambda x: self._initialIDs[x], ['gid', 'uid'])
# list of touples (attrName, idGenerator, src, dst) controlling iteration of top-level for loop.
# attrName is the name of object ID attribute to be normalised, idGenerator - ID generator
# for barIndexer object, src - list of objects containing every ID value in the object,
# dst - list of lists of objects to be normalised with uniformed ID values
mainloop = [('id', lambda: self.gid, self._hierarchyGroups),
# [self._hierarchyGroups]),
('uid', lambda: self.uid, self._structures)]#,
# [self._structures, self._hierarchyGroups])]
for (attrName, idGenerator, src) in mainloop:
# oldVals - redundant list of all ID values in the object ordered by the name of group
# (or structure) containing it
oldVals = (getattr(y, attrName) for y in\
sorted((x for x in src.itervalues() if hasattr(x, attrName)\
and getattr(x, attrName) != None),
key=lambda x: x.name))
try:
# newVals - old ID to new ID mapping dictionary
newVals = {}
# because of ID generator the order of execution is important!
for val in oldVals:
if val in newVals:
# ID is ambiguous
print >>sys.stderr, "ID = %d is ambigous, trying to fix it" % val
raise KeyError
newVals[val] = idGenerator()
# iterate an iterator over objects in dst updating ID attributes
# of the objects according to newVal dictionary
for x in src.itervalues():
if hasattr(x, attrName) and getattr(x, attrName) != None:
setattr(x, attrName, newVals[getattr(x, attrName)])
except KeyError:
# an attempt of IDs disambiguation
# reset ID generators
(self._gid, self._uid) = map(lambda x: self._initialIDs[x], ['gid', 'uid'])
names = set()
elementList = sorted((x for x in src.itervalues() if hasattr(x, attrName)\
and getattr(x, attrName) != None),
key=lambda x: x.name)
for element in elementList:
if element.name in names:
print element.name, "is also ambigous"
raise KeyError, "name %s is ambiguous" % element.name
names.add(element.name)
if hasattr(element, attrName) and getattr(element, attrName) != None:
setattr(element, attrName, idGenerator())
def __getGID(self):
"""
@return: value of the next element of the GID sequence
@rtype: int
"""
self._gid += 1
return self._gid
def __setGID(self, val):
"""
Increase the value of the current element of the GID sequence.
@type val: int
@param val: a new value of the current element of the GID sequence
@note: L{val} must be greater than the value of the current element
of the GID sequence.
"""
assert self._gid <= val, "gid can not be reduced"
self._gid = max(self._gid, val)
def __getUID(self):
"""
@return: value of the next element of the UID sequence
@rtype: int
"""
self._uid += 1
return self._uid
def __setUID(self, val):
"""
Increase the value of the current element of the UID sequence.
@type val: int
@param val: a new value of the current element of the UID sequence.
@note: L{val} must be greater than the value of the current element
of the UID sequence.
"""
assert self._uid <= val, "uid can not be reduced"
self._uid = max(self._uid, val)
def __indexStructures(self, structure, slide):
"""
Index provided structure with slide. If structure already exists
in index only new path are appended to it, otherwise new index entry
for this structure is created.
@type structure: L{barGenericStructure}
@param structure: structure to index
@type slide: L{_slideElement}
@param slide: slide to be indexed with the structure
"""
# If given structure exists:
if structure.name not in self._structures:
self._structures[structure.name] =\
self._structureElement(\
structure.name,
self._clsBoundingBox(structure.bbx),
self.uid,
slideList = slide)
# Otherwise:
else:
self._structures[structure.name].addSlide(slide)
self._structures[structure.name].bbx+=self._clsBoundingBox(structure.bbx)
def __getFullNameMapping(self):
"""
@return: hierarchy group name to full name mapping
@rtype: {str : str}
@note: Result of the method is cached in L{_fullNameMapping}.
"""
if self._fullNameMapping == None:
self._fullNameMapping =\
dict(map(lambda (k, v): (k, v.fullname), self.groups.iteritems()))
return self._fullNameMapping
def __setFullNameMapping(self, sourceDictionary=None, dummyNameElement="------"):
"""
Assign full names to the hierarchy groups. Update cached hierarchy
group name to full name mapping.
@param sourceDictionary: hierarchy group name to full name assignment
@type sourceDictionary: {str : str}
@param dummyNameElement: full name assigned to groups not included in
L{sourceDictionary}
@type dummyNameElement: str
@note: Full name mapping has to be assigned after creating strucutre
hierarchy - either flat or structured.
"""
#TODO: Provide documentation
if sourceDictionary != None:
self._fullNameMapping = sourceDictionary
else:
sourceDictionary = self.fullNameMapping
for group in self._hierarchyGroups.values():
# Assign fullname mapping. If given group element has no fullname
# use the goup name as the fullname
group.fullname = sourceDictionary.get(group.name, group.name)
def setNameMappingFromFile(self, filename, nameCol=0, fullNameCol=1):
"""
Assign full names to the hierarchy groups. Update cached hierarchy
group name to full name mapping.
@type filename: str
@param filename: path to the file containing name to full name assignment
@type nameCol: int
@param nameCol: column containing names
@type fullNameCol: int
@param fullNameCol: column containing full names
"""
fullNameDictionary =\
base.getDictionaryFromFile(filename, nameCol, fullNameCol)
self.fullNameMapping = fullNameDictionary
def __setHierarchyRoot(self, hierarchyRootElementName):
"""
Define the name of the superior group of the CAF dataset structures hierarhy.
@param hierarchyRootElementName: name of the superior group of the CAF
dataset structures hierarhy
@type hierarchyRootElementName: str
"""
self._hierarchyRootElementName = hierarchyRootElementName
def __getHierarchyRoot(self):
"""
@return: name of the superior group of the CAF dataset structures hierarhy
@rtype: str
"""
return self._hierarchyRootElementName
def __getColorMapping(self):
"""
@return: hierarchy group name to colour mapping
@rtype: {str : str}
@note: Result of the method is cached in L{_colorMapping}.
"""
if self._colorMapping == None:
self._colorMapping =\
dict(map(lambda (k, v): (k, v.fill), self.groups.iteritems()))
return self._colorMapping
def __setColorMapping(self, sourceDictionary=None, dummyElementColor="#777777"):
"""
Assign colours to the hierarchy groups. Update cached hierarchy
group name to colour mapping.
@param sourceDictionary: hierarchy group name to colour assignment
@type sourceDictionary: {str : str}
@param dummyElementColor: colour assigned to groups not included in
L{sourceDictionary}
@type dummyElementColor: str
@note: Color mapping has to be assigned after creating strucutre
hierarchy - either flat or structured.
"""
if sourceDictionary != None:
self._colorMapping = sourceDictionary
else:
sourceDictionary = self.colorMapping
# Iterate over all group structures and try to assign colors to
# every structure. If there is no mapping strutc => color, apply dummy
# color
for group in self._hierarchyGroups.values():
newCol = sourceDictionary.get(group.name, dummyElementColor)
if newCol.startswith("#"):
group.fill = newCol
else:
group.fill = "#" + newCol
def setColorMappingFromFile(self, colorFilename, nameCol = 0, colourCol = 1):
"""
Assign colours to the hierarchy groups. Update cached hierarchy
group name to colour mapping.
@type colorFilename: str
@param colorFilename: path to the file containing name to colour assignment
@type nameCol: int
@param nameCol: column containing names
@type colourCol: int
@param colourCol: column containing colour
"""
ColorFillDictionary =\
base.getDictionaryFromFile(colorFilename, nameCol, colourCol)
self.colorMapping = ColorFillDictionary
def fixOrphanStructures(self, parentGrpName = None):
"""
Search for structures not covered by hierarchy, then bind them to
the requested hierarchy group.
@param parentGrpName: name of group element which orphan structures
are binded to; if C{None} - orphan structures are
binded directly to hierarchy root element
@type parentGrpName: str
@note: If hierarchy group of name L{parentGrpName} does not exist it
is created and binded directly to hierarchy root element.
"""
# None parent means that orphans will be binded to hierarhy root
if parentGrpName == None:
parentGrpName = self._hierarchyRootElementName
elif parentGrpName not in self._hierarchyGroups:
# Chceck if parent node exist, create it if not. Newly created group
# will have hierarchy root as its parent.
self._hierarchyGroups[parentGrpName] =\
self._groupElement(name=parentGrpName, id=self.gid)
self._setHierarchyRelation(parentGrpName, self._hierarchyRootElementName)
# Extract names of orphan structures
orphans = [name for name in self.structures\
if not name in self._hierarchyGroups]
# Fore every orphan create corresponding structure and bind it to the
# parent group element
for structureName in orphans:
group = self._groupElement(\
name=structureName,
id=self.gid,
structure=self._structures[structureName])
self._hierarchyGroups[structureName] = group
self._setHierarchyRelation(structureName, parentGrpName)
def _setHierarchyRelation(self, child, parent):
"""
Create parent - child relation between two hierarchy groups.
@type child: str
@param child: Name of the child element
@type parent: str
@param parent: Name of the parent element
"""
parentGroup = self._hierarchyGroups[parent]
childGroup = self._hierarchyGroups[child]
parentGroup.children.append(childGroup)
childGroup.parent = parentGroup
def __setHierarchy(self, sourceDictionary):
"""
Create structure hierarchy according to provided relation.
@type sourceDictionary: {str : str}
@param sourceDictionary: children to parent mapping
@todo: Implement hierarchy validation.
@attention: the method destroys all existing elements attributes!
"""
#TODO: Implement hierarhy validataion:
if not self.__validateHierarchy(sourceDictionary): return
# Clear existing hierarchy:
if len(self._hierarchyGroups):
del self._hierarchyGroups
self._hierarchyGroups = {}
# Define unique list of hierarchy elements basing on provided dict
uniqeNames = list(set(base.flatten(sourceDictionary.items())))
#Create index groups items for each structure
for groupName in uniqeNames:
if self._structures.has_key(groupName):
structure = self._structures[groupName]
else:
structure = None
self._hierarchyGroups[groupName]=\
self._groupElement(name=groupName, id=self.gid,
structure=structure)
# Bind all group elements into hierarchy by assigning child - parent
# relation
for (child, parent) in sourceDictionary.items():
self._setHierarchyRelation(child, parent)
def __getHierarchy(self):
"""
@return: tuple-based hierarchy tree
@rtype: tuple
"""
brainRoot = self._hierarchyRootElementName
return self._hierarchyGroups[brainRoot].getChildList(depth=999)
def __validateHierarchy(self, sourceDictionary):
"""
Just a stub.
@todo: Implementation.
"""
return True
#TODO: Implement this
pass
def createFlatHierarchy(self):
"""
Create flat hierary: gather all component structures under common
superior L{hierarchyRootElementName} hierarchy group element.
No external ontology tree is required.
@note: This method has to be inveoked AFTER indexing all slides.
"""
# Extract all names of structures from self._structures
listOfStructs = self._structures.keys()
rootStruct = self._hierarchyRootElementName
flatHierDict ={}
# Create dictionary by assigning rootStruct to every structure found.
map(lambda x: flatHierDict.__setitem__(x, rootStruct), listOfStructs)
# Create actual hierarchy from flat dictionary
self.__setHierarchy(flatHierDict)
def setParentsFromFile(self, hierarchyFilename, childCol = 0, parentCol = 1):
"""
Create structure hierarchy according to relation provided in the file.
@type hierarchyFilename: str
@param hierarchyFilename: path to the child name to parent name mapping
file
@type childCol: int
@param childCol: column containing child names
@type parentCol: int
@param parentCol: column containing parent names
"""
cpDictionary =\
base.getDictionaryFromFile(hierarchyFilename, childCol, parentCol)
self.hierarchy = cpDictionary
def _validateInternalData(self):
"""
Perform internal indexed properties validation. If invalid - raise ValueError.
"""
# Iterate over list of required attributes and checks if they are
# defined. If an attribute is not defined, raise an exception.
for dataElement in self._requiredInternalData:
if not self._properties.has_key(dataElement):
raise ValueError(\
"Required index property not provided: %s.",\
(dataElement,))
def indexSingleSlide(self, tracedSlide, slideNumber):
"""
Register given CAF slide to the CAF dataset index.
@type slideNumber: int
@param slideNumber: slide number
@type tracedSlide: L{base.barTracedSlide}
@param tracedSlide: CAF slide representation
"""
print >>sys.stderr, "Indexer: indexing slide %d" % (slideNumber,)
slide = self._slideElement(\
tracedSlide.metadata[base.BAR_BREGMA_METADATA_TAGNAME].value,
slideNumber,
tracedSlide.metadata[base.BAR_TRAMAT_METADATA_TAGNAME].value)
# Iterate over all structures in given slide and create index entry for
# each of the structure:
map(lambda struct: self.__indexStructures(struct, slide),\
tracedSlide.values())
# Extract metadata from the slide
self._slides[slideNumber] = slide
def getXMLelement(self):
"""
@return: XML representation of represented CAF dataset index
@rtype: xml.dom.Document
"""
# Check if all required properties are assigned
self._validateInternalData()
self.__normaliseIDs()
indexerDocument = dom.Document()
indexerDocument.encoding=base.BAR_XML_ENCODING
(slideindexElement,
propertiesElement,
slidedetailsElement,
structurelistElement,
hierarchyElement) =\
map(lambda x: indexerDocument.createElement(x),
self._elementsList)
indexerDocument.appendChild(slideindexElement)
slideindexElement.appendChild(propertiesElement)
slideindexElement.appendChild(slidedetailsElement)
slideindexElement.appendChild(structurelistElement)
slideindexElement.appendChild(hierarchyElement)
for (name, slide) in sorted(self._slides.items()):
slidedetailsElement.appendChild(slide.getXMLelement())
for (name, structure) in sorted(self._structures.items()):
structurelistElement.appendChild(structure.getXMLelement())
for (name, property) in sorted(self._properties.items()):
propertiesElement.appendChild(property.getXMLelement())
hierarchyElement.appendChild(self._hierarchyGroups[self._hierarchyRootElementName].getXMLelement())
return indexerDocument
def __addGroup(self, group, parent=None):
"""
Parse the hierarchy group (sub)tree and update C{self.L{_hierarchyGroups}}
according to information in the tree nodes; update information about
parental nodes in the nodes of the tree.
@type group: L{barIndexerGroupElement}
@param group: hierarchy group (sub)tree to be parsed
@type parent: L{barIndexerGroupElement}
@param parent: parental node for the L{group} subtree
"""
name = group.name
self._hierarchyGroups[name] = group
group.parent = parent
for grp in group.children:
self.__addGroup(grp, group)
if name in self._structures:
group.structure = self._structures[name]
def visibleGroups(self, depth = 999, leavesOnly = False):
"""
An alias for C{self.L{groups}[self.L{hierarchyRootElementName}].L{getVisibleGroupIterator<barIndexerGroupElement.getVisibleGroupIterator>}()}.
"""
group = self.hierarchyRootElementName
return self.groups[group].getVisibleGroupIterator(depth = depth,
leavesOnly = leavesOnly)
def unfoldSubtrees(self, rootStructures, defaultDepth=0, leavesOnly=False):
"""
@param rootStructures: names of root elements of hierarchy subtrees or
pairs of root element name and depth of the subtree
@type rootStructures: iterable([str | (str, int), ...])
@param defaultDepth: the default depth of hierarchy subtrees
@type defaultDepth: int
@param leavesOnly: indicates if only the leaf nodes has to be returned
@type leavesOnly: bool
@return: names of hierarchy subtree tree nodes related to any structure
present in CAF slides
@rtype: set([str, ...])
"""
def unfoldSubtree(arg):
# check the argument type
if type(arg) is tuple:
root, depth = arg
else:
root, depth = arg, defaultDepth
group = self.groups[root]
return set(x.name\
for x in group.getVisibleGroupIterator(depth = depth,
leavesOnly=leavesOnly))
return reduce(lambda x, y: x | y, (unfoldSubtree(z) for z in rootStructures))
def __getProperty(self):
"""
@return: CAF dataset index property name to 'property' element
representation mapping
@rtype: {str : L{barIndexerPropertyElement}}
"""
return self._properties
def clearProperties(self):
"""
Remove all CAF dataset index properties.
"""
self._properties = {}
def updateProperties(self, propsDict):
"""
Updates properties of the indexer with the data from
provided dictionary.
@type propsDict: dict
@param propsDict: dictionary holding indexer's properties in which keys
are names of the properties.
@return: None
"""
for (name, value) in propsDict.items():
self._properties[name] = self._propertyElement(name, value)
def __setProperty(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "'Properties' is readonly property."
def __setSlides(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "Slides is readonly property."
def __getSlides(self):
"""
@return: slidenumber to 'slide' element representation mapping
@rtype: {int : L{barIndexerSlideElement}}
@note: be aware that it is not a copy, but the original dictionary!
"""
return self._slides
def __setStructures(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "Structures is readonly property."
def __getStructures(self):
"""
@return: name to 'structure' element representation mapping
@rtype: {str : L{barIndexerStructureElement}}
@note: be aware that it is not a copy, but the original dictionary!
"""
return self._structures
def __getUidList(self):
"""
For each of hierarchy groups find UIDs of structures assigned to it.
Example usage:
1. Filter out elements which may be reconstructed:
C{[name for (name, uid) in self.uidList.iteritems() if uid]}
2. Filter out elements that cannot be reconstructed as they don't
have children with paths assigned and they do not have uid
assigned itself:
C{[name for (name, uid) in self.uidList.iteritems() if not uid]}
@rtype: {str : [int, ...]}
@return: hierarchy group name to UIDs of assigned structures mapping
"""
return dict(map(lambda x: (x.name, x.uidList),
self._hierarchyGroups.values()))
def __setUidList(self, newList):
"""
Raise ValueError.
"""
raise ValueError, "uidList is read only property."
def __getHierarchyGroups(self):
"""
@rtype: {str : L{barIndexerGroupElement}}
@return: name to 'group' element representation mapping
"""
return self._hierarchyGroups
def __setHierarchyGroups(self, newValue):
"""
Raise ValueError.
"""
raise ValueError, "Read only property."
fromXML = classmethod(__fromXML)
"""
L{__fromXML} method bound to the class as its classmethod.
"""
_fromXML = staticmethod(__fromXML)
"""
L{__fromXML} method not bound to the class nor to the class instance.
"""
hierarchy = property(__getHierarchy, __setHierarchy)
"""
Hierarchy of the CAF dataset structures.
@type: non consistent
"""
hierarchyRootElementName = property(__getHierarchyRoot, __setHierarchyRoot)
"""
Name of the superior group of the CAF dataset structures hierarhy.
@type: str
"""
groups = property(__getHierarchyGroups, __setHierarchyGroups)
"""
Name to 'group' element representation mapping.
Read-only property.
@type: {str : L{barIndexerGroupElement}}
"""
properties = property(__getProperty, __setProperty)
"""
Name to 'property' element representation mapping.
Read-only property.
@type: {str : L{barIndexerPropertyElement}}
"""
slides = property(__getSlides, __setSlides)
"""
Slide number to 'slide' element representation mapping.
Read-only property.
@type: {str : L{barIndexerSlideElement}}
"""
structures = property(__getStructures, __setStructures)
"""
Name to 'structure' element representation mapping.
Read-only property.
@type: {str : L{barIndexerGroupElement}}
"""
colorMapping = property(__getColorMapping, __setColorMapping)
"""
Hierarchy group name to colour mapping.
@type: {str : str}
"""
fullNameMapping = property(__getFullNameMapping, __setFullNameMapping)
"""
Hierarchy group name to fullname mapping.
@type: {str : str}
"""
uid = property(__getUID, __setUID)
"""
If read - he value of the next element of the UID sequence.
If write - the value of the current element of the sequence.
The value of the current element of the sequence can be only increased.
@type: int
"""
gid = property(__getGID, __setGID)
"""
If read - the value of the next element of the GID sequence.
If write - the value of the current element of the sequence.
The value of the current element of the sequence can be only increased.
@type: int
@note: The property can be only increased.
"""
uidList = property(__getUidList, __setUidList)
"""
Hierarchy group name to UIDs of assigned structures mapping.
Read-only property.
@type: {str : [int, ...]}
"""
if __name__=='__main__':
pass
| pmajka/3dbar | lib/pymodules/python2.6/bar/atlas_indexer.py | Python | gpl-3.0 | 71,984 |
"""Ncpol2SDPA
=====
Provides
1. A converter from a polynomial optimization problems of commuting and
noncommuting variables to a semidefinite programming relaxation.
2. Helper functions to define physics problems.
"""
__version__ = "1.11.1"
from .faacets_relaxation import FaacetsRelaxation
from .sdp_relaxation import SdpRelaxation
from .steering_hierarchy import SteeringHierarchy
from .moroder_hierarchy import MoroderHierarchy
from .rdm_hierarchy import RdmHierarchy
from .nc_utils import generate_operators, generate_variables, get_monomials, \
flatten
from .sdpa_utils import read_sdpa_out
from .physics_utils import bosonic_constraints, fermionic_constraints, \
pauli_constraints, get_neighbors, get_next_neighbors, correlator, \
generate_measurements, projective_measurement_constraints, \
maximum_violation, define_objective_with_I, Probability
__all__ = ['SdpRelaxation',
'SteeringHierarchy',
'MoroderHierarchy',
'FaacetsRelaxation',
'RdmHierarchy',
'generate_operators',
'generate_variables',
'bosonic_constraints',
'fermionic_constraints',
'projective_measurement_constraints',
'correlator',
'maximum_violation',
'generate_measurements',
'define_objective_with_I',
'Probability',
'flatten',
'get_monomials',
'get_neighbors',
'get_next_neighbors',
'read_sdpa_out',
'pauli_constraints']
| cgogolin/ncpol2sdpa | ncpol2sdpa/__init__.py | Python | gpl-3.0 | 1,560 |
from os.path import dirname, realpath
from jinja2 import Environment, FileSystemLoader
from google.appengine.ext import ndb
DEBUG = True
SECRET_KEY = 'asdfjasdflkjsfewi23kjl3kjl45kjl56jk6hjb76vsjsa'
CONFIG = {
}
SRC_ROOT = dirname(realpath(__file__))
JINJA_ENVIRONMENT = Environment(
loader=FileSystemLoader(SRC_ROOT),
extensions=['jinja2.ext.autoescape'],
autoescape=True,
)
REGIONS = ['NSW', 'VIC', 'QLD', 'WA', 'SA', 'TAS', 'ACT', 'NT']
PARENT_KEY = ndb.Key('daddy', 'oz') | Tjorriemorrie/housing | src/settings.py | Python | mit | 496 |
'''
Created by auto_sdk on 2015.09.11
'''
from top.api.base import RestApi
class OpenimTribeDismissRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.tribe_id = None
self.user = None
def getapiname(self):
return 'taobao.openim.tribe.dismiss'
| Akagi201/pycsc | top/api/rest/OpenimTribeDismissRequest.py | Python | mit | 328 |
""" Routines to support optional packages """
try:
import nose
except ImportError:
have_nose = False
else:
have_nose = True
from .tripwire import TripWire
def optional_package(name, trip_msg=None):
""" Return package-like thing and module setup for package `name`
Parameters
----------
name : str
package name
trip_msg : None or str
message to give when someone tries to use the return package, but we
could not import it, and have returned a TripWire object instead.
Default message if None.
Returns
-------
pkg_like : module or ``TripWire`` instance
If we can import the package, return it. Otherwise return an object
raising an error when accessed
have_pkg : bool
True if import for package was successful, false otherwise
module_setup : function
callable usually set as ``setup_module`` in calling namespace, to allow
skipping tests.
Example
-------
Typical use would be something like this at the top of a module using an
optional package:
>>> from nibabel.optpkg import optional_package
>>> pkg, have_pkg, setup_module = optional_package('not_a_package')
Of course in this case the package doesn't exist, and so, in the module:
>>> have_pkg
False
and
>>> pkg.some_function() #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TripWireError: We need package not_a_package for these functions, but ``import not_a_package`` raised an ImportError
If the module does exist - we get the module
>>> pkg, _, _ = optional_package('os')
>>> hasattr(pkg, 'path')
True
Or a submodule if that's what we asked for
>>> subpkg, _, _ = optional_package('os.path')
>>> hasattr(subpkg, 'dirname')
True
"""
# fromlist=[''] results in submodule being returned, rather than the top
# level module. See help(__import__)
try:
pkg = __import__(name, fromlist=[''])
except ImportError:
pass
else: # import worked
# top level module
return pkg, True, lambda : None
if trip_msg is None:
trip_msg = ('We need package %s for these functions, but '
'``import %s`` raised an ImportError'
% (name, name))
pkg = TripWire(trip_msg)
def setup_module():
if have_nose:
raise nose.plugins.skip.SkipTest('No %s for these tests'
% name)
return pkg, False, setup_module
| ME-ICA/me-ica | meica.libs/nibabel/optpkg.py | Python | lgpl-2.1 | 2,576 |
# Copyright 2013. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import application
import unittest
from application import application
from flask import Flask, current_app, request, Response
""" Main test cases for our application """
class AppTestCase(unittest.TestCase):
#application = Flask(__name__)
def setUp(self):
application.testing = True
with application.app_context():
self.client = current_app.test_client()
def test_load_config(self):
""" Test that we can load our config properly """
self.assertTrue(1)
def test_get_test(self):
""" Test hitting /test and that we get a correct HTTP response """
self.assertTrue(1)
def test_get_form(self):
""" Test that we can get a signup form """
self.assertTrue(1)
def test_get_user(self):
""" Test that we can get a user context """
self.assertTrue(1)
def test_login(self):
""" Test that we can authenticate as a user """
self.assertTrue(1)
def test_test(self):
""" Test that we can create a new testr """
self.assertTrue(1)
if __name__ == '__main__':
unittest.main()
| mdj924/py-flask-signup | tests/application-tests.py | Python | apache-2.0 | 1,745 |
# test brenda-node by generating small text files
# as stand-ins for real frames
import os, sys, time, optparse
parser = optparse.OptionParser()
parser.add_option("-o", "--out", dest="out",
help="output file")
parser.add_option("-p", "--pause", type="int", dest="pause", default=1,
help="Pause delay per 'frame', default=%default")
parser.add_option("-s", "--start", type="int", dest="start",
help="start frame")
parser.add_option("-e", "--end", type="int", dest="end",
help="end frame")
parser.add_option("-j", "--step", type="int", dest="step",
help="frame increment")
( opts, args ) = parser.parse_args()
# generate test tmpfile
fn = os.path.join(os.environ['TMP'], "info.tmp")
with open(fn, 'w') as f:
f.write("start=%d end=%d step=%d pause=%d out=%s\n" % (opts.start, opts.end, opts.step, opts.pause, opts.out))
for i in xrange(opts.start, opts.end+1, opts.step):
# generate fake frame
fn = opts.out.replace("######", "%06d") % (i,) + '.txt'
print fn
with open(fn, 'w') as f:
f.write("This is a test, frame #%d\n" % (i,))
time.sleep(opts.pause)
| jamesyonan/brenda | test/perframe.py | Python | gpl-3.0 | 1,198 |
#!/usr/bin/python
"""
Diaphora, a diffing plugin for IDA
Copyright (c) 2015, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
KNOWN BUGS:
[ ] The choosers aren't updated when importing stuff.
TODO (for future versions):
[ ] Heuristics based on the call graph. This is why BinDiff was/is the
best one.
[ ] Instruction-level comment porting.
[ ] Import all names (global variables, etc...).
"""
import os
import sys
import time
import json
import decimal
import difflib
import sqlite3
import traceback
from hashlib import md5
from cStringIO import StringIO
from difflib import SequenceMatcher, HtmlDiff
from pygments import highlight
from pygments.lexers import NasmLexer, CppLexer
from pygments.formatters import HtmlFormatter
from idc import *
from idaapi import *
from idautils import *
if IDA_SDK_VERSION < 690:
# In versions prior to IDA 6.9 PySide is used...
from PySide import QtGui
QtWidgets = QtGui
is_pyqt5 = False
else:
# ...while in IDA 6.9, they switched to PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
is_pyqt5 = True
from others.tarjan_sort import strongly_connected_components, robust_topological_sort
from jkutils.kfuzzy import CKoretFuzzyHashing
from jkutils.factor import (FACTORS_CACHE, difference, difference_ratio,
primesbelow as primes)
#-----------------------------------------------------------------------
VERSION_VALUE = "1.0.8"
COPYRIGHT_VALUE="Copyright(c) 2015 Joxean Koret"
COMMENT_VALUE="Diaphora diffing plugin for IDA version %s" % VERSION_VALUE
# Constants unexported in IDA Python
PRTYPE_SEMI=0x0008
# Used to clean-up the pseudo-code and assembly dumps in order to get
# better comparison ratios
CMP_REPS = ["loc_", "sub_", "qword_", "dword_", "byte_", "word_", "off_",
"unk_", "stru_", "dbl_", "locret_", "short"]
CMP_REMS = ["dword ptr ", "byte ptr ", "word ptr ", "qword ptr ", "short ptr"]
# Messages
MSG_RELAXED_RATIO_ENABLED = """AUTOHIDE DATABASE\n<b>Relaxed ratio calculations</b> will be enabled. It will ignore many small
modifications to functions and will match more functions with higher ratios. Enable this option if you're only interested in the
new functionality. Disable it for patch diffing if you're interested in small modifications (like buffer sizes).
<br><br>
This is automatically done for diffing big databases (more than 20,000 functions in the database).<br><br>
You can disable it by un-checking the 'Relaxed calculations of differences ratios' option."""
MSG_FUNCTION_SUMMARIES_ONLY = """AUTOHIDE DATABASE\n<b>Do not export basic blocks or instructions</b> will be enabled.<br>
It will not export the information relative to basic blocks or<br>
instructions and 'Diff assembly in a graph' will not be available.
<br><br>
This is automatically done for exporting huge databases with<br>
more than 100,000 functions.<br><br>
You can disable it by un-checking the 'Do not export basic blocks<br>
or instructions' option."""
#-----------------------------------------------------------------------
def log(msg):
Message("[%s] %s\n" % (time.asctime(), msg))
#-----------------------------------------------------------------------
def log_refresh(msg, show=False):
if show:
show_wait_box(msg)
else:
replace_wait_box(msg)
log(msg)
#-----------------------------------------------------------------------
def quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.quick_ratio()
except:
print "quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def real_quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.real_quick_ratio()
except:
print "real_quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def ast_ratio(ast1, ast2):
if ast1 == ast2:
return 1.0
elif ast1 is None or ast2 is None:
return 0
return difference_ratio(decimal.Decimal(ast1), decimal.Decimal(ast2))
#-----------------------------------------------------------------------
class CHtmlViewer(PluginForm):
def OnCreate(self, form):
if is_pyqt5:
self.parent = self.FormToPyQtWidget(form)
else:
self.parent = self.FormToPySideWidget(form)
self.PopulateForm()
self.browser = None
self.layout = None
return 1
def PopulateForm(self):
self.layout = QtWidgets.QVBoxLayout()
self.browser = QtWidgets.QTextBrowser()
# Commented for now
#self.browser.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.browser.setHtml(self.text)
self.browser.setReadOnly(True)
self.browser.setFontWeight(12)
self.layout.addWidget(self.browser)
self.parent.setLayout(self.layout)
def Show(self, text, title):
self.text = text
return PluginForm.Show(self, title)
#-----------------------------------------------------------------------
class CChooser(Choose2):
class Item:
def __init__(self, ea, name, ea2 = None, name2 = None, desc="100% equal", ratio = 0):
self.ea = ea
self.vfname = name
self.ea2 = ea2
self.vfname2 = name2
self.description = desc
self.ratio = ratio
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
def __str__(self):
return '%08x' % self.ea
def __init__(self, title, bindiff, show_commands=True):
if title.startswith("Unmatched in"):
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20] ], Choose2.CH_MULTI)
else:
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20], ["Address 2", 10], ["Name 2", 20], ["Ratio", 5], ["Description", 30] ], Choose2.CH_MULTI)
if title == "Unmatched in primary":
self.primary = False
else:
self.primary = True
self.n = 0
self.items = []
self.icon = 41
self.bindiff = bindiff
self.show_commands = show_commands
self.cmd_diff_asm = None
self.cmd_diff_graph = None
self.cmd_diff_c = None
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
self.cmd_show_asm = None
self.cmd_show_pseudo = None
self.cmd_highlight_functions = None
self.cmd_unhighlight_functions = None
self.selected_items = []
def OnClose(self):
"""space holder"""
return True
def OnEditLine(self, n):
"""space holder"""
def OnInsertLine(self):
pass
def OnSelectLine(self, n):
item = self.items[int(n)]
if self.primary:
try:
jump_ea = int(item[1], 16)
# Only jump for valid addresses
if isEnabled(jump_ea):
jumpto(jump_ea)
except:
print "OnSelectLine", sys.exc_info()[1]
else:
self.bindiff.show_asm(self.items[n], self.primary)
def OnGetLine(self, n):
try:
return self.items[n]
except:
print "OnGetLine", sys.exc_info()[1]
def OnGetSize(self):
return len(self.items)
def OnDeleteLine(self, n):
try:
del self.items[n]
except:
pass
return True
def OnRefresh(self, n):
return n
def add_item(self, item):
if self.title.startswith("Unmatched in"):
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname])
else:
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname, "%08x" % int(item.ea2), item.vfname2, "%.3f" % item.ratio, item.description])
self.n += 1
def show(self, force=False):
t = self.Show()
if t < 0:
return False
if self.show_commands and (self.cmd_diff_asm is None or force):
# create aditional actions handlers
self.cmd_diff_asm = self.AddCommand("Diff assembly")
self.cmd_diff_c = self.AddCommand("Diff pseudo-code")
self.cmd_diff_graph = self.AddCommand("Diff assembly in a graph")
self.cmd_import_selected = self.AddCommand("Import selected")
self.cmd_import_all = self.AddCommand("Import *all* functions")
self.cmd_import_all_funcs = self.AddCommand("Import *all* data for sub_* functions")
self.cmd_highlight_functions = self.AddCommand("Highlight matches")
self.cmd_unhighlight_functions = self.AddCommand("Unhighlight matches")
self.cmd_save_results = self.AddCommand("Save diffing results")
elif not self.show_commands and (self.cmd_show_asm is None or force):
self.cmd_show_asm = self.AddCommand("Show assembly")
self.cmd_show_pseudo = self.AddCommand("Show pseudo-code")
return True
def get_color(self):
if self.title.startswith("Best"):
return 0xffff99
elif self.title.startswith("Partial"):
return 0x99ff99
elif self.title.startswith("Unreliable"):
return 0x9999ff
def OnCommand(self, n, cmd_id):
# Aditional right-click-menu commands handles
if cmd_id == self.cmd_import_all:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all(self.items)
elif cmd_id == self.cmd_import_all_funcs:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all_auto(self.items)
elif cmd_id == self.cmd_import_selected:
if len(self.selected_items) <= 1:
self.bindiff.import_one(self.items[n])
else:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all selected IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_selected(self.items, self.selected_items)
elif cmd_id == self.cmd_diff_c:
self.bindiff.show_pseudo_diff(self.items[n])
elif cmd_id == self.cmd_diff_asm:
self.bindiff.show_asm_diff(self.items[n])
elif cmd_id == self.cmd_show_asm:
self.bindiff.show_asm(self.items[n], self.primary)
elif cmd_id == self.cmd_show_pseudo:
self.bindiff.show_pseudo(self.items[n], self.primary)
elif cmd_id == self.cmd_highlight_functions:
if askyn_c(1, "HIDECANCEL\nDo you want to change the background color of each matched function?") == 1:
color = self.get_color()
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, color):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_unhighlight_functions:
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, 0xFFFFFF):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_diff_graph:
item = self.items[n]
ea1 = int(item[1], 16)
name1 = item[2]
ea2 = int(item[3], 16)
name2 = item[4]
log("Diff graph for 0x%x - 0x%x" % (ea1, ea2))
self.bindiff.graph_diff(ea1, name1, ea2, name2)
elif cmd_id == self.cmd_save_results:
filename = AskFile(1, "*.diaphora", "Select the file to store diffing results")
if filename is not None:
self.bindiff.save_results(filename)
return True
def OnSelectionChange(self, sel_list):
self.selected_items = sel_list
def OnGetLineAttr(self, n):
if not self.title.startswith("Unmatched"):
item = self.items[n]
ratio = float(item[5])
red = int(255 * (1 - ratio))
green = int(128 * ratio)
color = int("0x00%02x%02x" % (green, red), 16)
return [color, 0]
return [0xFFFFFF, 0]
#-----------------------------------------------------------------------
class CBinDiffExporterSetup(Form):
def __init__(self):
s = r"""Diaphora BinDiff
Please select the path to the SQLite database to save the current IDA database and the path of the SQLite database to diff against.
If no SQLite diff database is selected, it will just export the current IDA database to SQLite format. Leave the 2nd field empty if you are
exporting the first database.
SQLite databases: Export filter limits:
<#Select a file to export the current IDA database to SQLite format#Export IDA database to SQLite :{iFileSave}> <#Minimum address to find functions to export#From address:{iMinEA}>
<#Select the SQLite database to diff against #SQLite database to diff against:{iFileOpen}> <#Maximum address to find functions to export#To address :{iMaxEA}>
<Use the decompiler if available:{rUseDecompiler}>
<#Enable if you want neither sub_* functions nor library functions to be exported#Export only non-IDA generated functions:{rNonIdaSubs}>
<#Export only function summaries, not all instructions. Showing differences in a graph between functions will not be available.#Do not export instructions and basic blocks:{rFuncSummariesOnly}>
<Use probably unreliable methods:{rUnreliable}>
<Recommended to disable with databases with more than 5.000 functions#Use slow heuristics:{rSlowHeuristics}>
<#Enable this option if you aren't interested in small changes#Relaxed calculations of differences ratios:{rRelaxRatio}>
<Use experimental heuristics:{rExperimental}>
<#Enable this option to ignore sub_* names for the 'Same name' heuristic.#Ignore automatically generated names:{rIgnoreSubNames}>
<#Enable this option to ignore all function names for the 'Same name' heuristic.#Ignore all function names:{rIgnoreAllNames}>
<#Enable this option to ignore thunk functions, nullsubs, etc....#Ignore small functions:{rIgnoreSmallFunctions}>{cGroup1}>
NOTE: Don't select IDA database files (.IDB, .I64) as only SQLite databases are considered.
"""
args = {'iFileSave': Form.FileInput(save=True, swidth=40),
'iFileOpen': Form.FileInput(open=True, swidth=40),
'iMinEA': Form.NumericInput(tp=Form.FT_HEX, swidth=22),
'iMaxEA': Form.NumericInput(tp=Form.FT_HEX, swidth=22),
'cGroup1' : Form.ChkGroupControl(("rUseDecompiler",
"rUnreliable",
"rNonIdaSubs",
"rSlowHeuristics",
"rRelaxRatio",
"rExperimental",
"rFuncSummariesOnly",
"rIgnoreSubNames",
"rIgnoreAllNames",
"rIgnoreSmallFunctions"))}
Form.__init__(self, s, args)
def set_options(self, opts):
if opts.file_out is not None:
self.iFileSave.value = opts.file_out
if opts.file_in is not None:
self.iFileOpen.value = opts.file_in
self.rUseDecompiler.checked = opts.use_decompiler
self.rUnreliable.checked = opts.unreliable
self.rSlowHeuristics.checked = opts.slow
self.rRelaxRatio.checked = opts.relax
self.rExperimental.checked = opts.experimental
self.iMinEA.value = opts.min_ea
self.iMaxEA.value = opts.max_ea
self.rNonIdaSubs.checked = opts.ida_subs == False
self.rIgnoreSubNames.checked = opts.ignore_sub_names
self.rIgnoreAllNames.checked = opts.ignore_all_names
self.rIgnoreSmallFunctions.checked = opts.ignore_small_functions
self.rFuncSummariesOnly.checked = opts.func_summaries_only
def get_options(self):
opts = dict(
file_out = self.iFileSave.value,
file_in = self.iFileOpen.value,
use_decompiler = self.rUseDecompiler.checked,
unreliable = self.rUnreliable.checked,
slow = self.rSlowHeuristics.checked,
relax = self.rRelaxRatio.checked,
experimental = self.rExperimental.checked,
min_ea = self.iMinEA.value,
max_ea = self.iMaxEA.value,
ida_subs = self.rNonIdaSubs.checked == False,
ignore_sub_names = self.rIgnoreSubNames.checked,
ignore_all_names = self.rIgnoreAllNames.checked,
ignore_small_functions = self.rIgnoreSmallFunctions.checked,
func_summaries_only = self.rFuncSummariesOnly.checked
)
return BinDiffOptions(**opts)
#-----------------------------------------------------------------------
try:
class CAstVisitor(ctree_visitor_t):
def __init__(self, cfunc):
self.primes = primes(4096)
ctree_visitor_t.__init__(self, CV_FAST)
self.cfunc = cfunc
self.primes_hash = 1
return
def visit_expr(self, expr):
try:
self.primes_hash *= self.primes[expr.op]
except:
traceback.print_exc()
return 0
def visit_insn(self, ins):
try:
self.primes_hash *= self.primes[ins.op]
except:
traceback.print_exc()
return 0
except:
# It seems it may cause "problems" with trial versions... may be it
# causes problems too with versions without the decompiler???
class CAstVisitor:
pass
#-----------------------------------------------------------------------
class timeraction_t(object):
def __init__(self, func, args, interval):
self.func = func
self.args = args
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
def __call__(self):
if self.args is not None:
self.func(self.args)
else:
self.func()
return -1
#-----------------------------------------------------------------------
class uitimercallback_t(object):
def __init__(self, g, interval):
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
self.g = g
def __call__(self):
if not "GetTForm" in dir(self.g):
#log("Notice: IDA 6.6 doesn't support GetTForm, as so, it isn't possible to change the zoom.")
return -1
f = self.g.GetTForm()
switchto_tform(f, 1)
process_ui_action("GraphZoomFit", 0)
return -1
#-----------------------------------------------------------------------
class CDiffGraphViewer(GraphViewer):
def __init__(self, title, g, colours):
try:
GraphViewer.__init__(self, title, False)
self.graph = g[0]
self.relations = g[1]
self.nodes = {}
self.colours = colours
except:
Warning("CDiffGraphViewer: OnInit!!! " + str(sys.exc_info()[1]))
def OnRefresh(self):
try:
self.Clear()
self.nodes = {}
for key in self.graph:
self.nodes[key] = self.AddNode([key, self.graph[key]])
for key in self.relations:
if not key in self.nodes:
self.nodes[key] = self.AddNode([key, [[0, 0, ""]]])
parent_node = self.nodes[key]
for child in self.relations[key]:
if not child in self.nodes:
self.nodes[child] = self.AddNode([child, [[0, 0, ""]]])
child_node = self.nodes[child]
self.AddEdge(parent_node, child_node)
return True
except:
print "GraphViewer Error:", sys.exc_info()[1]
return True
def OnGetText(self, node_id):
try:
ea, rows = self[node_id]
if ea in self.colours:
colour = self.colours[ea]
else:
colour = 0xFFFFFF
ret = []
for row in rows:
ret.append(row[2])
label = "\n".join(ret)
return (label, colour)
except:
print "GraphViewer.OnGetText:", sys.exc_info()[1]
return ("ERROR", 0x000000)
def Show(self):
return GraphViewer.Show(self)
#-----------------------------------------------------------------------
g_bindiff = None
def show_choosers():
global g_bindiff
if g_bindiff is not None:
g_bindiff.show_choosers(True)
#-----------------------------------------------------------------------
def save_results():
global g_bindiff
if g_bindiff is not None:
filename = AskFile(1, "*.diaphora", "Select the file to store diffing results")
if filename is not None:
g_bindiff.save_results(filename)
#-----------------------------------------------------------------------
def load_results():
tmp_diff = CBinDiff(":memory:")
filename = AskFile(0, "*.diaphora", "Select the file to load diffing results")
if filename is not None:
tmp_diff.load_results(filename)
#-----------------------------------------------------------------------
def import_definitions():
tmp_diff = CBinDiff(":memory:")
filename = AskFile(0, "*.sqlite", "Select the file to import structures, unions and enumerations from")
if filename is not None:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all structures, unions and enumerations?") == 1:
tmp_diff.import_definitions_only(filename)
#-----------------------------------------------------------------------
MAX_PROCESSED_ROWS = 1000000
TIMEOUT_LIMIT = 60 * 3
#-----------------------------------------------------------------------
# Fix for people using IDASkins with very h4x0r $tYl3z like the
# Consonance color scheme
HtmlDiff._styles = """
table.diff {
font-family:Courier;
border:medium;
background-color:#ffffff;
color:#000000
}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
#-----------------------------------------------------------------------
class CBinDiff:
def __init__(self, db_name):
self.names = dict(Names())
self.primes = primes(2048*2048)
self.db_name = db_name
self.db = None
self.open_db()
self.matched1 = set()
self.matched2 = set()
self.total_functions1 = None
self.total_functions2 = None
self.equal_callgraph = False
self.kfh = CKoretFuzzyHashing()
# With this block size we're sure it will only apply to functions
# somehow big
self.kfh.bsize = 32
self.pseudo = {}
self.pseudo_hash = {}
self.unreliable = False
self.relaxed_ratio = False
self.experimental = False
self.slow_heuristics = False
self.use_decompiler_always = True
self.best_chooser = None
self.partial_chooser = None
self.unreliable_chooser = None
self.unmatched_second = None
self.unmatched_primary = None
self.last_diff_db = None
####################################################################
# LIMITS
#
# Do not run heuristics for more than 3 minutes per each 20.000
# functions.
self.timeout = TIMEOUT_LIMIT
# It's typical in SQL queries to get a cartesian product of the
# results in the functions tables. Do not process more than this
# value per each 20k functions.
self.max_processed_rows = MAX_PROCESSED_ROWS
# Limits to filter the functions to export
self.min_ea = MinEA()
self.max_ea = MaxEA()
# Export only non IDA automatically generated function names? I.e.,
# excluding these starting with sub_*
self.ida_subs = True
# Export only function summaries instead of also exporting both the
# basic blocks and all instructions used by functions?
self.function_summaries_only = False
# Ignore IDA's automatically generated sub_* names for heuristics
# like the 'Same name'?
self.ignore_sub_names = True
# Ignore any and all function names for the 'Same name' heuristic?
self.ignore_all_names = True
# Ignore small functions?
self.ignore_small_functions = False
####################################################################
def __del__(self):
if self.db is not None:
try:
if self.last_diff_db is not None:
with self.db.cursor():
cur.execute('detach "%s"' % self.last_diff_db)
except:
pass
self.db_close()
def open_db(self):
print "DATABASE NAME", self.db_name
self.db = sqlite3.connect(self.db_name)
self.db.text_factory = str
self.db.row_factory = sqlite3.Row
self.create_schema()
def db_cursor(self):
if self.db is None:
self.open_db()
return self.db.cursor()
def db_close(self):
self.db.close()
self.db = None
def create_schema(self):
cur = self.db_cursor()
cur.execute("PRAGMA foreign_keys = ON")
sql = """ create table if not exists functions (
id integer primary key,
name varchar(255),
address text unique,
nodes integer,
edges integer,
indegree integer,
outdegree integer,
size integer,
instructions integer,
mnemonics text,
names text,
prototype text,
cyclomatic_complexity integer,
primes_value text,
comment text,
mangled_function text,
bytes_hash text,
pseudocode text,
pseudocode_lines integer,
pseudocode_hash1 text,
pseudocode_primes text,
function_flags integer,
assembly text,
prototype2 text,
pseudocode_hash2 text,
pseudocode_hash3 text,
strongly_connected integer,
loops integer,
rva text unique,
tarjan_topological_sort text,
strongly_connected_spp text,
clean_assembly text,
clean_pseudo text,
mnemonics_spp text,
switches text,
function_hash text,
bytes_sum integer) """
cur.execute(sql)
sql = """ create table if not exists program (
id integer primary key,
callgraph_primes text,
callgraph_all_primes text,
processor text,
md5sum text
) """
cur.execute(sql)
sql = """ create table if not exists program_data (
id integer primary key,
name varchar(255),
type varchar(255),
value text
)"""
cur.execute(sql)
sql = """ create table if not exists version (value text) """
cur.execute(sql)
sql = """ create table if not exists instructions (
id integer primary key,
address text unique,
disasm text,
mnemonic text,
comment1 text,
comment2 text,
name text,
type text) """
cur.execute(sql)
sql = "create index if not exists idx_instructions_address on instructions (address)"
cur.execute(sql)
sql = """ create table if not exists basic_blocks (
id integer primary key,
num integer,
address text unique)"""
cur.execute(sql)
sql = """ create table if not exists bb_relations (
id integer primary key,
parent_id integer not null references basic_blocks(id) ON DELETE CASCADE,
child_id integer not null references basic_blocks(id) ON DELETE CASCADE)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_relations on bb_relations(parent_id, child_id)"
cur.execute(sql)
sql = """ create table if not exists bb_instructions (
id integer primary key,
basic_block_id integer references basic_blocks(id) on delete cascade,
instruction_id integer references instructions(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_instructions on bb_instructions (basic_block_id, instruction_id)"
cur.execute(sql)
sql = """ create table if not exists function_bblocks (
id integer primary key,
function_id integer not null references functions(id) on delete cascade,
basic_block_id integer not null references basic_blocks(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists id_function_blocks on function_bblocks (function_id, basic_block_id)"
cur.execute(sql)
cur.execute("select 1 from version")
row = cur.fetchone()
if not row:
cur.execute("insert into main.version values ('%s')" % VERSION_VALUE)
sql = "create index if not exists idx_assembly on functions(assembly)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_hash on functions(bytes_hash)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode on functions(pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_name on functions(name)"
cur.execute(sql)
sql = "create index if not exists idx_mangled_name on functions(mangled_function)"
cur.execute(sql)
sql = "create index if not exists idx_names on functions(names)"
cur.execute(sql)
sql = "create index if not exists idx_asm_pseudo on functions(assembly, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_nodes_edges_instructions on functions(nodes, edges, instructions)"
cur.execute(sql)
sql = "create index if not exists idx_composite1 on functions(nodes, edges, mnemonics, names, cyclomatic_complexity, prototype2, indegree, outdegree)"
cur.execute(sql)
sql = "create index if not exists idx_composite2 on functions(instructions, mnemonics, names)"
cur.execute(sql)
sql = "create index if not exists idx_composite3 on functions(nodes, edges, cyclomatic_complexity)"
cur.execute(sql)
sql = "create index if not exists idx_composite4 on functions(pseudocode_lines, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_composite5 on functions(pseudocode_lines, pseudocode_primes)"
cur.execute(sql)
sql = "create index if not exists idx_composite6 on functions(names, mnemonics)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash1 on functions(pseudocode_hash1)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash2 on functions(pseudocode_hash2)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash3 on functions(pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash on functions(pseudocode_hash1, pseudocode_hash2, pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected on functions(strongly_connected)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected_spp on functions(strongly_connected_spp)"
cur.execute(sql)
sql = "create index if not exists idx_loops on functions(loops)"
cur.execute(sql)
sql = "create index if not exists idx_rva on functions(rva)"
cur.execute(sql)
sql = "create index if not exists idx_tarjan_topological_sort on functions(tarjan_topological_sort)"
cur.execute(sql)
sql = "create index if not exists idx_mnemonics_spp on functions(mnemonics_spp)"
cur.execute(sql)
sql = "create index if not exists idx_clean_asm on functions(clean_assembly)"
cur.execute(sql)
sql = "create index if not exists idx_clean_pseudo on functions(clean_pseudo)"
cur.execute(sql)
sql = "create index if not exists idx_switches on functions(switches)"
cur.execute(sql)
sql = "create index if not exists idx_function_hash on functions(function_hash)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_sum on functions(bytes_sum)"
cur.execute(sql)
cur.close()
def attach_database(self, diff_db):
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % diff_db)
cur.close()
def reinit(self, main_db, diff_db, create_choosers=True):
log("Main database '%s'." % main_db)
log("Diff database '%s'." % diff_db)
self.__init__(main_db)
self.attach_database(diff_db)
if create_choosers:
self.create_choosers()
def import_definitions_only(self, filename):
self.reinit(":memory:", filename)
self.import_til()
self.import_definitions()
def load_results(self, filename):
results_db = sqlite3.connect(filename)
results_db.text_factory = str
results_db.row_factory = sqlite3.Row
cur = results_db.cursor()
try:
sql = "select main_db, diff_db, version from config"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 1:
Warning("Malformed results database!")
return False
row = rows[0]
version = row["version"]
if version != VERSION_VALUE:
msg = "The version of the diff results is %s and current version is %s, there can be some incompatibilities."
Warning(msg % (version, VERSION_VALUE))
main_db = row["main_db"]
diff_db = row["diff_db"]
if not os.path.exists(main_db):
log("Primary database %s not found." % main_db)
main_db = AskFile(0, main_db, "Select the primary database path")
if main_db is None:
return False
if not os.path.exists(diff_db):
diff_db = AskFile(0, main_db, "Select the secondary database path")
if diff_db is None:
return False
self.reinit(main_db, diff_db)
sql = "select * from results"
cur.execute(sql)
for row in cur.fetchall():
if row["type"] == "best":
choose = self.best_chooser
elif row["type"] == "partial":
choose = self.partial_chooser
else:
choose = self.unreliable_chooser
ea1 = int(row["address"], 16)
name1 = row["name"]
ea2 = int(row["address2"], 16)
name2 = row["name2"]
desc = row["description"]
ratio = float(row["ratio"])
choose.add_item(CChooser.Item(ea1, name1, ea2, name2, desc, ratio))
sql = "select * from unmatched"
cur.execute(sql)
for row in cur.fetchall():
if row["type"] == "primary":
choose = self.unmatched_primary
else:
choose = self.unmatched_second
choose.add_item(CChooser.Item(int(row["address"], 16), row["name"]))
log("Showing diff results.")
self.show_choosers()
return True
finally:
cur.close()
results_db.close()
return False
def save_results(self, filename):
if os.path.exists(filename):
os.remove(filename)
log("Previous diff results '%s' removed." % filename)
results_db = sqlite3.connect(filename)
results_db.text_factory = str
cur = results_db.cursor()
try:
sql = "create table config (main_db text, diff_db text, version text, date text)"
cur.execute(sql)
sql = "insert into config values (?, ?, ?, ?)"
cur.execute(sql, (self.db_name, self.last_diff_db, VERSION_VALUE, time.asctime()))
sql = "create table results (type, line, address, name, address2, name2, ratio, description)"
cur.execute(sql)
sql = "create table unmatched (type, line, address, name)"
cur.execute(sql)
with results_db:
results_sql = "insert into results values (?, ?, ?, ?, ?, ?, ?, ?)"
unmatched_sql = "insert into unmatched values (?, ?, ?, ?)"
for item in self.best_chooser.items:
l = list(item)
l.insert(0, 'best')
cur.execute(results_sql, l)
for item in self.partial_chooser.items:
l = list(item)
l.insert(0, 'partial')
cur.execute(results_sql, l)
for item in self.unreliable_chooser.items:
l = list(item)
l.insert(0, 'unreliable')
cur.execute(results_sql, l)
for item in self.unmatched_primary.items:
l = list(item)
l.insert(0, 'primary')
cur.execute(unmatched_sql, l)
for item in self.unmatched_second.items:
l = list(item)
l.insert(0, 'secondary')
cur.execute(unmatched_sql, l)
log("Diffing results saved in file '%s'." % filename)
finally:
cur.close()
results_db.close()
def add_program_data(self, type_name, key, value):
cur = self.db_cursor()
sql = "insert into main.program_data (name, type, value) values (?, ?, ?)"
values = (key, type_name, value)
cur.execute(sql, values)
cur.close()
def read_function(self, f, discard=False):
name = GetFunctionName(int(f))
true_name = name
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
f = int(f)
func = get_func(f)
if not func:
log("Cannot get a function object for 0x%x" % f)
return False
flow = FlowChart(func)
size = 0
if not self.ida_subs:
# Unnamed function, ignore it...
if name.startswith("sub_") or name.startswith("j_") or name.startswith("unknown"):
return False
# Already recognized runtime's function?
flags = GetFunctionFlags(f)
if flags & FUNC_LIB or flags == -1:
return False
nodes = 0
edges = 0
instructions = 0
mnems = []
dones = {}
names = set()
bytes_hash = []
bytes_sum = 0
function_hash = []
outdegree = 0
indegree = len(list(CodeRefsTo(f, 1)))
assembly = {}
basic_blocks_data = {}
bb_relations = {}
bb_topo_num = {}
bb_topological = {}
switches = []
mnemonics_spp = 1
cpu_ins_list = GetInstructionList()
cpu_ins_list.sort()
image_base = self.get_base_address()
for block in flow:
nodes += 1
instructions_data = []
block_ea = block.startEA - image_base
idx = len(bb_topological)
bb_topological[idx] = []
bb_topo_num[block_ea] = idx
for x in list(Heads(block.startEA, block.endEA)):
mnem = GetMnem(x)
disasm = GetDisasm(x)
size += ItemSize(x)
instructions += 1
if mnem in cpu_ins_list:
mnemonics_spp += self.primes[cpu_ins_list.index(mnem)]
try:
assembly[block_ea].append(disasm)
except KeyError:
if nodes == 1:
assembly[block_ea] = [disasm]
else:
assembly[block_ea] = ["loc_%x:" % x, disasm]
decoded_size = idaapi.decode_insn(x)
if idaapi.cmd.Operands[0].type in [o_mem, o_imm, o_far, o_near, o_displ]:
decoded_size -= idaapi.cmd.Operands[0].offb
if idaapi.cmd.Operands[1].type in [o_mem, o_imm, o_far, o_near, o_displ]:
decoded_size -= idaapi.cmd.Operands[1].offb
if decoded_size <= 0:
decoded_size = 1
curr_bytes = GetManyBytes(x, decoded_size)
if curr_bytes is None or len(curr_bytes) != decoded_size:
log("Failed to read %d bytes at [%08x]" % (decoded_size, x))
continue
bytes_hash.append(curr_bytes)
bytes_sum += sum(map(ord, curr_bytes))
function_hash.append(GetManyBytes(x, ItemSize(x)))
outdegree += len(list(CodeRefsFrom(x, 0)))
mnems.append(mnem)
op_value = GetOperandValue(x, 1)
if op_value == BADADDR:
op_value = GetOperandValue(x, 0)
tmp_name = None
if op_value != BADADDR and op_value in self.names:
tmp_name = self.names[op_value]
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
tmp_name = demangled_name
if not tmp_name.startswith("sub_"):
names.add(tmp_name)
l = list(CodeRefsFrom(x, 0))
if len(l) == 0:
l = DataRefsFrom(x)
tmp_type = None
for ref in l:
if ref in self.names:
tmp_name = self.names[ref]
tmp_type = GetType(ref)
ins_cmt1 = GetCommentEx(x, 0)
ins_cmt2 = GetCommentEx(x, 1)
instructions_data.append([x - image_base, mnem, disasm, ins_cmt1, ins_cmt2, tmp_name, tmp_type])
switch = get_switch_info_ex(x)
if switch:
switch_cases = switch.get_jtable_size()
results = calc_switch_cases(x, switch)
# It seems that IDAPython for idaq64 has some bug when reading
# switch's cases. Do not attempt to read them if the 'cur_case'
# returned object is not iterable.
can_iter = False
switch_cases_values = set()
for idx in xrange(len(results.cases)):
cur_case = results.cases[idx]
if not '__iter__' in dir(cur_case):
break
can_iter |= True
for cidx in xrange(len(cur_case)):
case_id = cur_case[cidx]
switch_cases_values.add(case_id)
if can_iter:
switches.append([switch_cases, list(switch_cases_values)])
basic_blocks_data[block_ea] = instructions_data
bb_relations[block_ea] = []
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_relations[block_ea].append(succ_base)
edges += 1
indegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for pred_block in block.preds():
try:
bb_relations[pred_block.startEA - image_base].append(block.startEA - image_base)
except KeyError:
bb_relations[pred_block.startEA - image_base] = [block.startEA - image_base]
edges += 1
outdegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for block in flow:
block_ea = block.startEA - image_base
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_topological[bb_topo_num[block_ea]].append(bb_topo_num[succ_base])
strongly_connected_spp = 0
try:
strongly_connected = strongly_connected_components(bb_relations)
bb_topological = robust_topological_sort(bb_topological)
bb_topological = json.dumps(bb_topological)
strongly_connected_spp = 1
for item in strongly_connected:
val = len(item)
if val > 1:
strongly_connected_spp *= self.primes[val]
except:
# XXX: FIXME: The original implementation that we're using is
# recursive and can fail. We really need to create our own non
# recursive version.
strongly_connected = []
bb_topological = None
loops = 0
for sc in strongly_connected:
if len(sc) > 1:
loops += 1
else:
if sc[0] in bb_relations and sc[0] in bb_relations[sc[0]]:
loops += 1
asm = []
keys = assembly.keys()
keys.sort()
# After sorting our the addresses of basic blocks, be sure that the
# very first address is always the entry point, no matter at what
# address it is.
keys.remove(f - image_base)
keys.insert(0, f - image_base)
for key in keys:
asm.extend(assembly[key])
asm = "\n".join(asm)
cc = edges - nodes + 2
proto = self.guess_type(f)
proto2 = GetType(f)
try:
prime = str(self.primes[cc])
except:
log("Cyclomatic complexity too big: 0x%x -> %d" % (f, cc))
prime = 0
comment = GetFunctionCmt(f, 1)
bytes_hash = md5("".join(bytes_hash)).hexdigest()
function_hash = md5("".join(function_hash)).hexdigest()
function_flags = GetFunctionFlags(f)
pseudo = None
pseudo_hash1 = None
pseudo_hash2 = None
pseudo_hash3 = None
pseudo_lines = 0
pseudocode_primes = None
if f in self.pseudo:
pseudo = "\n".join(self.pseudo[f])
pseudo_lines = len(self.pseudo[f])
pseudo_hash1, pseudo_hash2, pseudo_hash3 = self.kfh.hash_bytes(pseudo).split(";")
if pseudo_hash1 == "":
pseudo_hash1 = None
if pseudo_hash2 == "":
pseudo_hash2 = None
if pseudo_hash3 == "":
pseudo_hash3 = None
pseudocode_primes = str(self.pseudo_hash[f])
clean_assembly = self.get_cmp_asm_lines(asm)
clean_pseudo = self.get_cmp_pseudo_lines(pseudo)
rva = f - self.get_base_address()
return (name, nodes, edges, indegree, outdegree, size, instructions, mnems, names,
proto, cc, prime, f, comment, true_name, bytes_hash, pseudo, pseudo_lines,
pseudo_hash1, pseudocode_primes, function_flags, asm, proto2,
pseudo_hash2, pseudo_hash3, len(strongly_connected), loops, rva, bb_topological,
strongly_connected_spp, clean_assembly, clean_pseudo, mnemonics_spp, switches,
function_hash, bytes_sum,
basic_blocks_data, bb_relations)
def get_base_address(self):
return idaapi.get_imagebase()
def get_instruction_id(self, addr):
cur = self.db_cursor()
sql = "select id from instructions where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def get_bb_id(self, addr):
cur = self.db_cursor()
sql = "select id from basic_blocks where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def save_function(self, props):
# XXX: FIXME: TODO: Insert relations (xrefs) between instructions
# too. It will allow, in the future, to create the reader for some
# devices...
cur = self.db_cursor()
new_props = []
for prop in props[:len(props)-2]:
# XXX: Fixme! This is a hack for 64 bit architectures kernels
if type(prop) is long and prop > 0xFFFFFFFF:
prop = str(prop)
if type(prop) is list or type(prop) is set:
new_props.append(json.dumps(list(prop)))
else:
new_props.append(prop)
sql = """insert into main.functions (name, nodes, edges, indegree, outdegree, size,
instructions, mnemonics, names, prototype,
cyclomatic_complexity, primes_value, address,
comment, mangled_function, bytes_hash, pseudocode,
pseudocode_lines, pseudocode_hash1, pseudocode_primes,
function_flags, assembly, prototype2, pseudocode_hash2,
pseudocode_hash3, strongly_connected, loops, rva,
tarjan_topological_sort, strongly_connected_spp,
clean_assembly, clean_pseudo, mnemonics_spp, switches,
function_hash, bytes_sum)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?)"""
cur.execute(sql, new_props)
func_id = cur.lastrowid
if not self.function_summaries_only:
bb_data, bb_relations = props[len(props)-2:]
instructions_ids = {}
sql = """insert into main.instructions (address, mnemonic, disasm, comment1, comment2, name, type)
values (?, ?, ?, ?, ?, ?, ?)"""
self_get_instruction_id = self.get_instruction_id
cur_execute = cur.execute
for key in bb_data:
for insn in bb_data[key]:
addr, mnem, disasm, cmt1, cmt2, name, mtype = insn
db_id = self_get_instruction_id(str(addr))
if db_id is None:
cur_execute(sql, (str(addr), mnem, disasm, cmt1, cmt2, name, mtype))
db_id = cur.lastrowid
instructions_ids[addr] = db_id
num = 0
bb_ids = {}
sql1 = "insert into main.basic_blocks (num, address) values (?, ?)"
sql2 = "insert into main.bb_instructions (basic_block_id, instruction_id) values (?, ?)"
self_get_bb_id = self.get_bb_id
for key in bb_data:
# Insert each basic block
num += 1
ins_ea = str(key)
last_bb_id = self_get_bb_id(ins_ea)
if last_bb_id is None:
cur_execute(sql1, (num, ins_ea))
last_bb_id = cur.lastrowid
bb_ids[ins_ea] = last_bb_id
# Insert relations between basic blocks and instructions
for insn in bb_data[key]:
ins_id = instructions_ids[insn[0]]
cur_execute(sql2, (last_bb_id, ins_id))
# Insert relations between basic blocks
sql = "insert into main.bb_relations (parent_id, child_id) values (?, ?)"
for key in bb_relations:
for bb in bb_relations[key]:
bb = str(bb)
key = str(key)
cur_execute(sql, (bb_ids[key], bb_ids[bb]))
# And finally insert the functions to basic blocks relations
sql = "insert into main.function_bblocks (function_id, basic_block_id) values (?, ?)"
for key in bb_ids:
bb_id = bb_ids[key]
cur_execute(sql, (func_id, bb_id))
cur.close()
def save_callgraph(self, primes, all_primes, md5sum):
cur = self.db_cursor()
sql = "insert into main.program (callgraph_primes, callgraph_all_primes, processor, md5sum) values (?, ?, ?, ?)"
proc = idaapi.get_idp_name()
if BADADDR == 0xFFFFFFFFFFFFFFFF:
proc += "64"
cur.execute(sql, (primes, all_primes, proc, md5sum))
cur.close()
def GetLocalType(self, ordinal, flags):
ret = GetLocalTinfo(ordinal)
if ret is not None:
(stype, fields) = ret
if stype:
name = GetLocalTypeName(ordinal)
return idc_print_type(stype, fields, name, flags)
return ""
def export_structures(self):
# It seems that GetMaxLocalType, sometimes, can return negative
# numbers, according to one beta-tester. My guess is that it's a bug
# in IDA. However, as we cannot reproduce, at least handle this
# condition.
local_types = GetMaxLocalType()
if (local_types & 0x80000000) != 0:
log("Warning: GetMaxLocalType returned a negative number (0x%x)!" % local_types)
return
for i in range(local_types):
name = GetLocalTypeName(i+1)
definition = self.GetLocalType(i+1, PRTYPE_MULTI | PRTYPE_TYPE | PRTYPE_SEMI | PRTYPE_PRAGMA)
type_name = "struct"
if definition.startswith("enum"):
type_name = "enum"
elif definition.startswith("union"):
type_name = "union"
# For some reason, IDA my return types with the form "__int128 unsigned",
# we want it the right way "unsigned __int128".
if name and name.find(" ") > -1:
names = name.split(" ")
name = names[0]
if names[1] == "unsigned":
name = "unsigned %s" % name
self.add_program_data(type_name, name, definition)
def get_til_names(self):
idb_path = GetIdbPath()
filename, ext = os.path.splitext(idb_path)
til_path = "%s.til" % filename
with open(til_path, "rb") as f:
line = f.readline()
pos = line.find("Local type definitions")
if pos > -1:
tmp = line[pos+len("Local type definitions")+1:]
pos = tmp.find("\x00")
if pos > -1:
defs = tmp[:pos].split(",")
return defs
return None
def export_til(self):
til_names = self.get_til_names()
if til_names is not None:
for til in til_names:
self.add_program_data("til", til, None)
def do_export(self):
i = 0
callgraph_primes = 1
callgraph_all_primes = {}
func_list = list(Functions(self.min_ea, self.max_ea))
total_funcs = len(func_list)
t = time.time()
for func in func_list:
i += 1
if (total_funcs > 100) and i % (total_funcs/100) == 0 or i == 1:
line = "Exported %d function(s) out of %d total.\nElapsed %d:%02d:%02d second(s), remaining time ~%d:%02d:%02d"
elapsed = time.time() - t
remaining = (elapsed / i) * (total_funcs - i)
m, s = divmod(remaining, 60)
h, m = divmod(m, 60)
m_elapsed, s_elapsed = divmod(elapsed, 60)
h_elapsed, m_elapsed = divmod(m_elapsed, 60)
replace_wait_box(line % (i, total_funcs, h_elapsed, m_elapsed, s_elapsed, h, m, s))
props = self.read_function(func)
if props == False:
continue
ret = props[11]
callgraph_primes *= decimal.Decimal(ret)
try:
callgraph_all_primes[ret] += 1
except KeyError:
callgraph_all_primes[ret] = 1
self.save_function(props)
# Try to fix bug #30
if i % (total_funcs/10) == 0:
self.db.commit()
md5sum = GetInputFileMD5()
self.save_callgraph(str(callgraph_primes), json.dumps(callgraph_all_primes), md5sum)
self.export_structures()
self.export_til()
def export(self):
try:
show_wait_box("Exporting database")
self.do_export()
finally:
hide_wait_box()
self.db.commit()
cur = self.db_cursor()
cur.execute("analyze")
cur.close()
self.db_close()
def import_til(self):
log("Importing type libraries...")
cur = self.db_cursor()
sql = "select name from diff.program_data where type = 'til'"
cur.execute(sql)
for row in cur.fetchall():
LoadTil(row[0])
cur.close()
Wait()
def get_valid_definition(self, defs):
""" Try to get a valid structure definition by removing (yes) the
invalid characters typically found in IDA's generated structs."""
ret = defs.replace("?", "_").replace("@", "_")
ret = ret.replace("$", "_").replace("#", "_")
return ret
def import_definitions(self):
cur = self.db_cursor()
sql = "select type, name, value from diff.program_data where type in ('structure', 'struct', 'enum')"
cur.execute(sql)
rows = cur.fetchall()
new_rows = set()
for row in rows:
if row[1] is None:
continue
the_name = row[1].split(" ")[0]
if GetStrucIdByName(the_name) == BADADDR:
type_name = "struct"
if row[0] == "enum":
type_name = "enum"
elif row[0] == "union":
type_name == "union"
new_rows.add(row)
ret = ParseTypes("%s %s;" % (type_name, row[1]))
if ret != 0:
pass
for i in xrange(10):
for row in new_rows:
if row[1] is None:
continue
the_name = row[1].split(" ")[0]
if GetStrucIdByName(the_name) == BADADDR and GetStrucIdByName(row[1]) == BADADDR:
definition = self.get_valid_definition(row[2])
ret = ParseTypes(definition)
if ret != 0:
pass
cur.close()
Wait()
def import_one(self, item):
ret = askyn_c(1, "AUTOHIDE DATABASE\nDo you want to import all the type libraries, structs and enumerations?")
if ret == 1:
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
elif ret == -1:
return
# Import just the selected item
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2, True)
new_func = self.read_function(str(ea1))
self.delete_function(ea1)
self.save_function(new_func)
self.db.commit()
def prettify_asm(self, asm_source):
asm = []
for line in asm_source.split("\n"):
if not line.startswith("loc_"):
asm.append("\t" + line)
else:
asm.append(line)
return "\n".join(asm)
def show_asm_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, assembly, name, 1
from functions
where address = ?
and assembly is not null
union select prototype, assembly, name, 2
from diff.functions
where address = ?
and assembly is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no assembly available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
asm1 = self.prettify_asm(row1[1])
asm2 = self.prettify_asm(row2[1])
buf1 = "%s proc near\n%s\n%s endp" % (row1[2], asm1, row1[2])
buf2 = "%s proc near\n%s\n%s endp" % (row2[2], asm2, row2[2])
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff assembler %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_asm(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, assembly, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (ea, ))
row = cur.fetchone()
if row is None:
Warning("Sorry, there is no assembly available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
asm = self.prettify_asm(row[1])
final_asm = "; %s\n%s proc near\n%s\n%s endp\n"
final_asm = final_asm % (row[0], row[2], asm, row[2])
src = highlight(final_asm, NasmLexer(), fmt)
title = "Assembly for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def get_cmp_asm_lines(self, asm):
sio = StringIO(asm)
lines = []
get_cmp_asm = self.get_cmp_asm
for line in sio.readlines():
line = line.strip("\n")
lines.append(get_cmp_asm(line))
return "\n".join(lines)
def get_cmp_pseudo_lines(self, pseudo):
if pseudo is None:
return pseudo
# Remove all the comments
tmp = re.sub(" // .*", "", pseudo)
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", rep + "XXXX", tmp)
tmp = re.sub("v[0-9]+", "vXXX", tmp)
tmp = re.sub("a[0-9]+", "aXXX", tmp)
tmp = re.sub("arg_[0-9]+", "aXXX", tmp)
return tmp
def get_cmp_asm(self, asm):
if asm is None:
return asm
# Ignore the comments in the assembly dump
tmp = asm.split(";")[0]
tmp = tmp.split(" # ")[0]
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", "XXXX", tmp)
# Remove dword ptr, byte ptr, etc...
for rep in CMP_REMS:
tmp = re.sub(rep + "[a-f0-9A-F]+", "", tmp)
reps = ["\+[a-f0-9A-F]+h\+"]
for rep in reps:
tmp = re.sub(rep, "+XXXX+", tmp)
tmp = re.sub("\.\.[a-f0-9A-F]{8}", "XXX", tmp)
# Strip any possible remaining white-space character at the end of
# the cleaned-up instruction
tmp = re.sub("[ \t\n]+$", "", tmp)
return tmp
def compare_graphs_pass(self, bblocks1, bblocks2, colours1, colours2, is_second = False):
dones1 = set()
dones2 = set()
# Now compare each basic block from the first function to all the
# basic blocks in the 2nd function
for key1 in bblocks1:
if key1 in dones1:
continue
for key2 in bblocks2:
if key2 in dones2:
continue
# Same number of instructions?
if len(bblocks1[key1]) == len(bblocks2[key2]):
mod = False
partial = True
i = 0
for ins1 in bblocks1[key1]:
ins2 = bblocks2[key2][i]
# Same mnemonic? The change can be only partial
if ins1[1] != ins2[1]:
partial = False
# Try to compare the assembly after doing some cleaning
cmp_asm1 = self.get_cmp_asm(ins1[2])
cmp_asm2 = self.get_cmp_asm(ins2[2])
if cmp_asm1 != cmp_asm2:
mod = True
if not partial:
continue
i += 1
if not mod:
# Perfect match, we discovered a basic block equal in both
# functions
colours1[key1] = 0xffffff
colours2[key2] = 0xffffff
dones1.add(key1)
dones2.add(key2)
break
elif not is_second and partial:
# Partial match, we discovered a basic block with the same
# mnemonics but something changed
#
# NOTE:
# Do not add the partial matches to the dones lists, as we
# can have complete matches after a partial match!
colours1[key1] = 0xCCffff
colours2[key2] = 0xCCffff
break
return colours1, colours2
def compare_graphs(self, g1, ea1, g2, ea2):
colours1 = {}
colours2 = {}
bblocks1 = g1[0]
bblocks2 = g2[0]
# Consider, by default, all blocks added, news
for key1 in bblocks1:
colours1[key1] = 0xCCCCFF
for key2 in bblocks2:
colours2[key2] = 0xCCCCFF
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, False)
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, True)
return colours1, colours2
def graph_diff(self, ea1, name1, ea2, name2):
g1 = self.get_graph(str(ea1), True)
g2 = self.get_graph(str(ea2))
if g1 == ({}, {}) or g2 == ({}, {}):
Warning("Sorry, graph information is not available for one of the databases.")
return False
colours = self.compare_graphs(g1, ea1, g2, ea2)
title1 = "Graph for %s (primary)" % name1
title2 = "Graph for %s (secondary)" % name2
graph1 = CDiffGraphViewer(title1, g1, colours[0])
graph2 = CDiffGraphViewer(title2, g2, colours[1])
graph1.Show()
graph2.Show()
set_dock_pos(title1, title2, DP_RIGHT)
uitimercallback_t(graph1, 100)
uitimercallback_t(graph2, 100)
def get_graph(self, ea1, primary=False):
if primary:
db = "main"
else:
db = "diff"
cur = self.db_cursor()
dones = set()
sql = """ select bb.address, ins.address, ins.mnemonic, ins.disasm
from %s.function_bblocks fb,
%s.bb_instructions bbins,
%s.instructions ins,
%s.basic_blocks bb,
%s.functions f
where ins.id = bbins.instruction_id
and bbins.basic_block_id = bb.id
and bb.id = fb.basic_block_id
and f.id = fb.function_id
and f.address = ?
order by bb.address asc""" % (db, db, db, db, db)
cur.execute(sql, (ea1,))
bb_blocks = {}
for row in cur.fetchall():
bb_ea = str(int(row[0]))
ins_ea = str(int(row[1]))
mnem = row[2]
dis = row[3]
if ins_ea in dones:
continue
dones.add(ins_ea)
try:
bb_blocks[bb_ea].append([ins_ea, mnem, dis])
except KeyError:
bb_blocks[bb_ea] = [ [ins_ea, mnem, dis] ]
sql = """ select (select address
from %s.basic_blocks
where id = bbr.parent_id),
(select address
from %s.basic_blocks
where id = bbr.child_id)
from %s.bb_relations bbr,
%s.function_bblocks fbs,
%s.basic_blocks bbs,
%s.functions f
where f.id = fbs.function_id
and bbs.id = fbs.basic_block_id
and fbs.basic_block_id = bbr.child_id
and f.address = ?
order by 1 asc, 2 asc""" % (db, db, db, db, db, db)
cur.execute(sql, (ea1, ))
rows = cur.fetchall()
bb_relations = {}
for row in rows:
bb_ea1 = str(row[0])
bb_ea2 = str(row[1])
try:
bb_relations[bb_ea1].add(bb_ea2)
except KeyError:
bb_relations[bb_ea1] = set([bb_ea2])
cur.close()
return bb_blocks, bb_relations
def show_pseudo(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, pseudocode, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (str(ea), ))
row = cur.fetchone()
if row is None or row[0] is None or row[1] is None:
Warning("Sorry, there is no pseudo-code available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
func = "%s\n%s" % (row[0], row[1])
src = highlight(func, CppLexer(), fmt)
title = "Pseudo-code for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_pseudo_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, pseudocode, name, 1
from functions
where address = ?
and pseudocode is not null
union select prototype, pseudocode, name, 2
from diff.functions
where address = ?
and pseudocode is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no pseudo-code available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
buf1 = row1[0] + "\n" + row1[1]
buf2 = row2[0] + "\n" + row2[1]
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff pseudo-code %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def delete_function(self, ea):
cur = self.db_cursor()
cur.execute("delete from functions where address = ?", (ea, ))
cur.close()
def is_auto_generated(self, name):
for rep in CMP_REPS:
if name.startswith(rep):
return True
return False
def import_instruction(self, ins_data1, ins_data2):
ea1 = self.get_base_address() + int(ins_data1[0])
ea2, cmt1, cmt2, name, mtype = ins_data2
# Set instruction level comments
if cmt1 is not None and get_cmt(ea1, 0) is None:
set_cmt(ea1, cmt1, 0)
if cmt2 is not None and get_cmt(ea1, 1) is None:
set_cmt(ea1, cmt1, 1)
tmp_ea = None
set_type = False
data_refs = list(DataRefsFrom(ea1))
if len(data_refs) > 0:
# Global variables
tmp_ea = data_refs[0]
if tmp_ea in self.names:
curr_name = GetTrueName(tmp_ea)
if curr_name != name and self.is_auto_generated(curr_name):
MakeName(tmp_ea, name)
set_type = False
else:
MakeName(tmp_ea, name)
set_type = True
else:
# Functions
code_refs = list(CodeRefsFrom(ea1, 0))
if len(code_refs) == 0:
code_refs = list(CodeRefsFrom(ea1, 1))
if len(code_refs) > 0:
curr_name = GetTrueName(code_refs[0])
if curr_name != name and self.is_auto_generated(curr_name):
MakeName(code_refs[0], name)
tmp_ea = code_refs[0]
set_type = True
if tmp_ea is not None and set_type:
if mtype is not None and GetType(tmp_ea) != mtype:
SetType(tmp_ea, mtype)
def import_instruction_level(self, ea1, ea2, cur):
cur = self.db_cursor()
try:
# Check first if we have any importable items
sql = """ select ins.address ea, ins.disasm dis, ins.comment1 cmt1, ins.comment2 cmt2, ins.name name, ins.type type
from diff.function_bblocks bb,
diff.functions f,
diff.bb_instructions bbi,
diff.instructions ins
where f.id = bb.function_id
and bbi.basic_block_id = bb.basic_block_id
and ins.id = bbi.instruction_id
and f.address = ?
and (ins.comment1 is not null
or ins.comment2 is not null
or ins.name is not null) """
cur.execute(sql, (ea2,))
import_rows = cur.fetchall()
if len(import_rows) > 0:
import_syms = {}
for row in import_rows:
import_syms[row["dis"]] = [row["ea"], row["cmt1"], row["cmt2"], row["name"], row["type"]]
# Check in the current database
sql = """ select ins.address ea, ins.disasm dis, ins.comment1 cmt1, ins.comment2 cmt2, ins.name name, ins.type type
from function_bblocks bb,
functions f,
bb_instructions bbi,
instructions ins
where f.id = bb.function_id
and bbi.basic_block_id = bb.basic_block_id
and ins.id = bbi.instruction_id
and f.address = ?"""
cur.execute(sql, (ea1,))
match_rows = cur.fetchall()
if len(match_rows) > 0:
matched_syms = {}
for row in match_rows:
matched_syms[row["dis"]] = [row["ea"], row["cmt1"], row["cmt2"], row["name"], row["type"]]
# We have 'something' to import, let's diff the assembly...
sql = """select *
from (
select assembly, 1
from functions
where address = ?
and assembly is not null
union select assembly, 2
from diff.functions
where address = ?
and assembly is not null)
order by 2 asc"""
cur.execute(sql, (ea1, ea2))
diff_rows = cur.fetchall()
if len(diff_rows) > 0:
lines1 = diff_rows[0][0]
lines2 = diff_rows[1][0]
matches = {}
to_line = None
change_line = None
diff_list = difflib.ndiff(lines1.splitlines(1), lines2.splitlines(1))
for x in diff_list:
if x[0] == '-':
change_line = x[1:].strip(" ").strip("\r").strip("\n")
elif x[0] == '+':
to_line = x[1:].strip(" ").strip("\r").strip("\n")
elif change_line is not None:
change_line = None
if to_line is not None and change_line is not None:
matches[change_line] = to_line
if change_line in matched_syms and to_line in import_syms:
self.import_instruction(matched_syms[change_line], import_syms[to_line])
change_line = to_line = None
finally:
cur.close()
def do_import_one(self, ea1, ea2, force = False):
cur = self.db_cursor()
sql = "select prototype, comment, mangled_function, function_flags from diff.functions where address = ?"
cur.execute(sql, (ea2,))
row = cur.fetchone()
if row is not None:
proto = row[0]
comment = row[1]
name = row[2]
flags = row[3]
ea1 = int(ea1)
if not name.startswith("sub_") or force:
if not MakeNameEx(ea1, name, SN_NOWARN|SN_NOCHECK):
for i in xrange(10):
if MakeNameEx(ea1, "%s_%d" % (name, i), SN_NOWARN|SN_NOCHECK):
break
if proto is not None and proto != "int()":
SetType(ea1, proto)
if comment is not None and comment != "":
SetFunctionCmt(ea1, comment, 1)
if flags is not None:
SetFunctionFlags(ea1, flags)
self.import_instruction_level(ea1, ea2, cur)
cur.close()
def import_selected(self, items, selected):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
new_items = []
for item in selected:
new_items.append(items[item-1])
self.import_items(new_items)
def import_items(self, items):
to_import = set()
# Import all the function names and comments
for item in items:
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2)
to_import.add(ea1)
try:
show_wait_box("Updating primary database...")
total = 0
for ea in to_import:
ea = str(ea)
new_func = self.read_function(ea)
self.delete_function(ea)
self.save_function(new_func)
total += 1
self.db.commit()
finally:
hide_wait_box()
def do_import_all(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser
self.import_items(items)
def do_import_all_auto(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser for sub_* functions
new_items = []
for item in items:
name1 = item[2]
if name1.startswith("sub_"):
new_items.append(item)
self.import_items(new_items)
def re_diff(self):
self.best_chooser.Close()
self.partial_chooser.Close()
if self.unreliable_chooser is not None:
self.unreliable_chooser.Close()
if self.unmatched_primary is not None:
self.unmatched_primary.Close()
if self.unmatched_second is not None:
self.unmatched_second.Close()
ret = askyn_c(1, "Do you want to show only the new matches?")
if ret == -1:
return
elif ret == 0:
self.matched1 = set()
self.matched2 = set()
self.diff(self.last_diff_db)
def import_all(self, items):
try:
self.do_import_all(items)
msg = "AUTOHIDE DATABASE\nHIDECANCEL\nAll functions were imported. Do you want to relaunch the diffing process?"
if askyn_c(1, msg) == 1:
self.db.execute("detach diff")
# We cannot run that code here or otherwise IDA will crash corrupting the stack
timeraction_t(self.re_diff, None, 1000)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def import_all_auto(self, items):
try:
self.do_import_all_auto(items)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def equal_db(self):
cur = self.db_cursor()
sql = "select count(*) from program p, diff.program dp where p.md5sum = dp.md5sum"
cur.execute(sql)
row = cur.fetchone()
ret = row[0] == 1
if not ret:
sql = "select count(*) from (select * from functions except select * from diff.functions) x"
cur.execute(sql)
row = cur.fetchone()
else:
log("Same MD5 in both databases")
cur.close()
return row[0] == 0
def check_callgraph(self):
cur = self.db_cursor()
sql = """select callgraph_primes, callgraph_all_primes from program
union all
select callgraph_primes, callgraph_all_primes from diff.program"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) == 2:
cg1 = decimal.Decimal(rows[0][0])
cg_factors1 = json.loads(rows[0][1])
cg2 = decimal.Decimal(rows[1][0])
cg_factors2 = json.loads(rows[1][1])
if cg1 == cg2:
self.equal_callgraph = True
log("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
Warning("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
else:
FACTORS_CACHE[cg1] = cg_factors1
FACTORS_CACHE[cg2] = cg_factors2
diff = difference(cg1, cg2)
total = sum(cg_factors1.values())
percent = diff * 100. / total
log("Callgraphs from both programs differ in %f%%" % percent)
cur.close()
def find_equal_matches(self):
cur = self.db_cursor()
# Start by calculating the total number of functions in both databases
sql = """select count(*) total1 from functions
union all
select count(*) total2 from diff.functions"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 2:
Warning("Malformed database, only %d rows!" % len(rows))
raise Exception("Malformed database!")
self.total_functions1 = rows[0][0]
self.total_functions2 = rows[1][0]
sql = "select address, mangled_function from (select * from functions intersect select * from diff.functions) x"
cur.execute(sql)
rows = cur.fetchall()
choose = self.best_chooser
if len(rows) > 0:
for row in rows:
name = row[1]
ea = LocByName(name)
ea2 = row[0]
choose.add_item(CChooser.Item(ea, name, ea2, name, "100% equal", 1))
self.matched1.add(name)
self.matched2.add(name)
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Same RVA and hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes,
f.function_hash, df.function_hash
from functions f,
diff.functions df
where df.rva = f.rva
and df.bytes_hash = f.bytes_hash
and df.instructions = f.instructions
and ((f.name = df.name and substr(f.name, 1, 4) != 'sub_')
or (substr(f.name, 1, 4) = 'sub_' or substr(df.name, 1, 4)))"""
log_refresh("Finding with heuristic 'Same RVA and hash'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Same order and hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes,
f.function_hash, df.function_hash
from functions f,
diff.functions df
where df.id = f.id
and df.bytes_hash = f.bytes_hash
and df.instructions = f.instructions
and ((f.name = df.name and substr(f.name, 1, 4) != 'sub_')
or (substr(f.name, 1, 4) = 'sub_' or substr(df.name, 1, 4)))"""
log_refresh("Finding with heuristic 'Same order and hash'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Function hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes,
f.function_hash, df.function_hash
from functions f,
diff.functions df
where f.function_hash = df.function_hash
and f.instructions > 5 and df.instructions > 5 """
log_refresh("Finding with heuristic 'Function hash'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.bytes_hash = df.bytes_hash
and f.names = df.names
and f.names != '[]'
and f.instructions > 5 and df.instructions > 5"""
log_refresh("Finding with heuristic 'Bytes hash and names'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.bytes_hash = df.bytes_hash
and f.instructions > 5 and df.instructions > 5"""
log_refresh("Finding with heuristic 'Bytes hash'")
self.add_matches_from_query(sql, choose)
if not self.equal_callgraph and not self.ignore_all_names:
self.find_same_name(self.partial_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes sum' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.bytes_sum = df.bytes_sum
and f.size = df.size
and f.instructions > 5 and df.instructions > 5"""
log_refresh("Finding with heuristic 'Bytes sum'")
self.add_matches_from_query(sql, choose)
sql = """select f.address, f.name, df.address, df.name, 'Equal pseudo-code' description
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines >= 5 """ + postfix + """
union
select f.address, f.name, df.address, df.name, 'Equal assembly' description
from functions f,
diff.functions df
where f.assembly = df.assembly
and df.assembly is not null
""" + postfix
log_refresh("Finding with heuristic 'Equal assembly or pseudo-code'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Same cleaned up assembly or pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.clean_assembly = df.clean_assembly
or f.clean_pseudo = df.clean_pseudo""" + postfix
log_refresh("Finding with heuristic 'Same cleaned up assembly or pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Same address, nodes, edges and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics""" + postfix
log_refresh("Finding with heuristic 'Same address, nodes, edges and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, None)
cur.close()
def decompile_and_get(self, ea):
if not init_hexrays_plugin():
return False
f = get_func(ea)
if f is None:
return False
cfunc = decompile(f);
if cfunc is None:
# Failed to decompile
return False
visitor = CAstVisitor(cfunc)
visitor.apply_to(cfunc.body, None)
self.pseudo_hash[ea] = visitor.primes_hash
sv = cfunc.get_pseudocode();
self.pseudo[ea] = []
first_line = None
for sline in sv:
line = tag_remove(sline.line);
if line.startswith("//"):
continue
if first_line is None:
first_line = line
else:
self.pseudo[ea].append(line)
return first_line
def guess_type(self, ea):
t = GuessType(ea)
if not self.use_decompiler_always:
return t
else:
try:
ret = self.decompile_and_get(ea)
if ret:
t = ret
except:
log("Cannot decompile 0x%x: %s" % (ea, str(sys.exc_info()[1])))
return t
def ast_ratio(self, ast1, ast2):
if not self.relaxed_ratio:
return 0
return ast_ratio(ast1, ast2)
def check_ratio(self, ast1, ast2, pseudo1, pseudo2, asm1, asm2):
fratio = quick_ratio
decimal_values = "{0:.2f}"
if self.relaxed_ratio:
fratio = real_quick_ratio
decimal_values = "{0:.1f}"
v3 = 0
ast_done = False
if self.relaxed_ratio and ast1 is not None and ast2 is not None and max(len(ast1), len(ast2)) < 16:
ast_done = True
v3 = self.ast_ratio(ast1, ast2)
if v3 == 1:
return 1.0
v1 = 0
if pseudo1 is not None and pseudo2 is not None and pseudo1 != "" and pseudo2 != "":
tmp1 = self.get_cmp_pseudo_lines(pseudo1)
tmp2 = self.get_cmp_pseudo_lines(pseudo2)
if tmp1 == "" or tmp2 == "":
log("Error cleaning pseudo-code!")
else:
v1 = fratio(tmp1, tmp2)
v1 = float(decimal_values.format(v1))
if v1 == 1.0:
# If real_quick_ratio returns 1 try again with quick_ratio
# because it can result in false positives. If real_quick_ratio
# says 'different', there is no point in continuing.
if fratio == real_quick_ratio:
v1 = quick_ratio(tmp1, tmp2)
if v1 == 1.0:
return 1.0
tmp_asm1 = self.get_cmp_asm_lines(asm1)
tmp_asm2 = self.get_cmp_asm_lines(asm2)
v2 = fratio(tmp_asm1, tmp_asm2)
v2 = float(decimal_values.format(v2))
if v2 == 1:
# Actually, same as the quick_ratio/real_quick_ratio check done
# with the pseudo-code
if fratio == real_quick_ratio:
v2 = quick_ratio(tmp_asm1, tmp_asm2)
if v2 == 1.0:
return 1.0
if self.relaxed_ratio and not ast_done:
v3 = fratio(ast1, ast2)
v3 = float(decimal_values.format(v3))
if v3 == 1:
return 1.0
r = max(v1, v2, v3)
return r
def all_functions_matched(self):
return len(self.matched1) == self.total_functions1 or \
len(self.matched2) == self.total_functions2
def add_matches_from_query_ratio(self, sql, best, partial, unreliable=None):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 5 and unreliable is not None:
unreliable.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
else:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query_ratio_max(self, sql, best, partial, val):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > val:
best.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif partial is not None:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query(self, sql, choose):
""" Warning: use this *only* if the ratio is known to be 1.00 """
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
while 1:
i += 1
if i % 1000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
if name1 in self.matched1 or name2 in self.matched2:
continue
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def search_small_differences(self, choose):
cur = self.db_cursor()
# Same basic blocks, edges, mnemonics, etc... but different names
sql = """ select distinct f.address ea, f.name name1, df.name name2,
f.names, df.names, df.address ea2
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.names != '[]'"""
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
ea = str(row[0])
name1 = row[1]
name2 = row[2]
if name1 in self.matched1 or name2 in self.matched2:
continue
s1 = set(json.loads(row[3]))
s2 = set(json.loads(row[4]))
total = max(len(s1), len(s2))
commons = len(s1.intersection(s2))
ratio = (commons * 1.) / total
if ratio >= 0.5:
ea2 = row[5]
item = CChooser.Item(ea, name1, ea2, name2, "Nodes, edges, complexity and mnemonics with small differences", ratio)
if ratio == 1.0:
self.best_chooser.add_item(item)
else:
choose.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
return
def find_same_name(self, choose):
cur = self.db_cursor()
sql = """select f.address, f.mangled_function, d.address, f.name, d.name, d.mangled_function,
f.pseudocode, d.pseudocode,
f.assembly, d.assembly,
f.pseudocode_primes, d.pseudocode_primes
from functions f,
diff.functions d
where d.mangled_function = f.mangled_function
or d.name = f.name"""
log_refresh("Finding with heuristic 'Same name'")
cur.execute(sql)
rows = cur.fetchall()
cur.close()
if len(rows) > 0 and not self.all_functions_matched():
for row in rows:
ea = row[0]
name = row[1]
ea2 = row[2]
name1 = row[3]
name2 = row[4]
name2_1 = row[5]
if name in self.matched1 or name1 in self.matched1 or \
name2 in self.matched2 or name2_1 in self.matched2:
continue
if self.ignore_sub_names and name.startswith("sub_"):
continue
ast1 = row[10]
ast2 = row[11]
pseudo1 = row[6]
pseudo2 = row[7]
asm1 = row[8]
asm2 = row[9]
ratio = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if float(ratio) == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", 1))
else:
choose.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", ratio))
self.matched1.add(name)
self.matched1.add(name1)
self.matched2.add(name2)
self.matched2.add(name2_1)
def get_function_id(self, name, primary=True):
cur = self.db_cursor()
rid = None
db_name = "main"
if not primary:
db_name = "diff"
try:
sql = "select id from %s.functions where name = ?" % db_name
cur.execute(sql, (name,))
row = cur.fetchone()
if row:
rid = row[0]
finally:
cur.close()
return rid
def find_matches_in_hole(self, last, item, row):
cur = self.db_cursor()
try:
postfix = ""
if self.ignore_small_functions:
postfix = " and instructions > 5"
desc = "Call address sequence"
id1 = row["id1"]
id2 = row["id2"]
sql = """ select * from functions where id = ? """ + postfix + """
union all
select * from diff.functions where id = ? """ + postfix
thresold = min(0.6, float(item[5]))
done = False
for j in range(0, min(10, id1 - last)):
if done:
break
for i in range(0, min(10, id1 - last)):
if done:
break
cur.execute(sql, (id1+j, id2+i))
rows = cur.fetchall()
if len(rows) == 2:
name1 = rows[0]["name"]
name2 = rows[1]["name"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(rows[0]["pseudocode_primes"], rows[1]["pseudocode_primes"], \
rows[0]["pseudocode"], rows[1]["pseudocode"], \
rows[0]["assembly"], rows[1]["assembly"])
if r < 0.5:
if rows[0]["names"] != "[]" and rows[0]["names"] == rows[1]["names"]:
r = 0.5001
if r > thresold:
ea = rows[0]["address"]
ea2 = rows[1]["address"]
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > 0.5:
self.partial_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
else:
self.unreliable_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
finally:
cur.close()
def find_from_matches(self, the_items):
# XXX: FIXME: This is wrong in many ways, but still works... FIX IT!
# Rule 1: if a function A in program P is has id X and function B in
# the same program is has id + 1, then, in program P2, function B
# maybe the next function to A in P2.
log_refresh("Finding with heuristic 'Call address sequence'")
cur = self.db_cursor()
try:
# Create a copy of all the functions
cur.execute("create temporary table best_matches (id, id1, ea1, name1, id2, ea2, name2)")
# Insert each matched function into the temporary table
i = 0
for match in the_items:
ea1 = match[1]
name1 = match[2]
ea2 = match[3]
name2 = match[4]
id1 = self.get_function_id(name1)
id2 = self.get_function_id(name2, False)
sql = """insert into best_matches (id, id1, ea1, name1, id2, ea2, name2)
values (?, ?, ?, ?, ?, ?, ?)"""
cur.execute(sql, (i, id1, ea1, name1, id2, ea2, name2))
i += 1
last = None
cur.execute("select * from best_matches order by id1 asc")
for row in cur:
row_id = row["id1"]
if last is None or last+1 == row_id:
last = row_id
continue
item = the_items[row["id"]]
self.find_matches_in_hole(last, item, row)
last = row_id
cur.execute("drop table best_matches")
finally:
cur.close()
# Rule 2: given a match for a function F in programs P & P2, find
# parents and children of the matched function using the parents and
# children of program P.
# TODO: Implement it.
pass
def find_matches(self):
choose = self.partial_chooser
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
sql = """select f.address, f.name, df.address, df.name,
'All attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.pseudocode_hash1 = df.pseudocode_hash1
and f.pseudocode_primes = df.pseudocode_primes
and f.pseudocode_hash2 = df.pseudocode_hash2
and f.pseudocode_hash3 = df.pseudocode_hash3
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp """ + postfix + """
union
select f.address, f.name, df.address, df.name,
'Most attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp """
sql += postfix
log_refresh("Finding with heuristic 'All or most attributes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Switch structures' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.switches = df.switches
and df.switches != '[]' """ + postfix
log_refresh("Finding with heuristic 'Switch structures'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.2)
sql = """select f.address, f.name, df.address, df.name,
'Same address, nodes, edges and primes (re-ordered instructions)' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.primes_value = df.primes_value
and f.nodes > 3""" + postfix
log_refresh("Finding with heuristic 'Same address, nodes, edges and primes (re-ordered instructions)'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Import names hash',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.names != '[]'
and f.nodes = df.nodes
and f.edges = df.edges
and f.instructions = df.instructions""" + postfix
log_refresh("Finding with heuristic 'Import names hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names, prototype2, in-degree and out-degree',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.nodes > 3
and f.edges > 3
and f.names != '[]'""" + postfix + """
union
select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names and prototype2' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.names != '[]'
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2""" + postfix
log_refresh("Finding with heuristic 'Nodes, edges, complexity, mnemonics, names, prototype, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics = df.mnemonics
and f.instructions = df.instructions
and f.names = df.names
and f.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Mnemonics and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics_spp = df.mnemonics_spp
and f.instructions = df.instructions
and df.instructions > 5"""
log_refresh("Finding with heuristic 'Mnemonics small-primes-product'")
self.add_matches_from_query_ratio(sql, choose, choose)
# Search using some of the previous criterias but calculating the
# edit distance
log_refresh("Finding with heuristic 'Small names difference'")
self.search_small_differences(choose)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1
or df.pseudocode_hash2 = f.pseudocode_hash2
or df.pseudocode_hash3 = f.pseudocode_hash3""" + postfix
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hashes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
else:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1""" + postfix
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code and names' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and f.names = df.names
and df.names != '[]'
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null""" + postfix
log_refresh("Finding with heuristic 'Similar pseudo-code and names'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null""" + postfix
log_refresh("Finding with heuristic 'Similar pseudo-code'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.6)
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines > 3
and length(f.pseudocode_primes) >= 35""" + postfix
log_refresh("Finding with heuristic 'Pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
if self.slow_heuristics:
sql = """ select distinct f.address, f.name, df.address, df.name, 'Partial pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where substr(df.pseudocode_hash1, 1, 16) = substr(f.pseudocode_hash1, 1, 16)
or substr(df.pseudocode_hash2, 1, 16) = substr(f.pseudocode_hash2, 1, 16)
or substr(df.pseudocode_hash3, 1, 16) = substr(f.pseudocode_hash3, 1, 16)""" + postfix
log_refresh("Finding with heuristic 'Partial pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
sql = """select f.address, f.name, df.address, df.name,
'Topological sort hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected > 3""" + postfix
log_refresh("Finding with heuristic 'Topological sort hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 20
and f.prototype2 = df.prototype2
and df.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Same high complexity, prototype and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 15
and df.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Same high complexity and names'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 1
and f.nodes > 5 and df.nodes > 5
and f.strongly_connected_spp > 1
and df.strongly_connected_spp > 1""" + postfix
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.80)
sql = """ select f.address, f.name, df.address, df.name, 'Strongly connected components small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected_spp = df.strongly_connected_spp
and df.strongly_connected_spp > 1""" + postfix
log_refresh("Finding with heuristic 'Strongly connected components small-primes-product'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1
and f.nodes > 3 and df.nodes > 3""" + postfix
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.49)
sql = """ select f.address, f.name, df.address, df.name, 'Same names and order' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and df.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Same names and order'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """select f.address, f.name, df.address, df.name,
'Same nodes, edges and strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.strongly_connected = df.strongly_connected
and df.nodes > 4""" + postfix
log_refresh("Finding with heuristic 'Same nodes, edges and strongly connected components'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose, self.unreliable_chooser)
def find_experimental_matches(self):
choose = self.unreliable_chooser
# Call address sequence heuristic
self.find_from_matches(self.best_chooser.items)
self.find_from_matches(self.partial_chooser.items)
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines <= 5
and df.pseudocode is not null
and f.pseudocode is not null""" + postfix
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.49)
sql = """select distinct f.address, f.name, df.address, df.name, 'Small pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines <= 5""" + postfix
log_refresh("Finding with heuristic 'Small pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Equal small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines < 5""" + postfix
log_refresh("Finding with heuristic 'Equal small pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 20
and f.prototype2 = df.prototype2
and df.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Same low complexity, prototype and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same low complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 15
and df.names != '[]'""" + postfix
log_refresh("Finding with heuristic 'Same low complexity and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
if self.slow_heuristics:
# For large databases (>25k functions) it may cause, for a reason,
# the following error: OperationalError: database or disk is full
sql = """ select f.address, f.name, df.address, df.name,
'Same graph' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp""" + postfix + """
order by
case when f.size = df.size then 1 else 0 end +
case when f.instructions = df.instructions then 1 else 0 end +
case when f.mnemonics = df.mnemonics then 1 else 0 end +
case when f.names = df.names then 1 else 0 end +
case when f.prototype2 = df.prototype2 then 1 else 0 end +
case when f.primes_value = df.primes_value then 1 else 0 end +
case when f.bytes_hash = df.bytes_hash then 1 else 0 end +
case when f.pseudocode_hash1 = df.pseudocode_hash1 then 1 else 0 end +
case when f.pseudocode_primes = df.pseudocode_primes then 1 else 0 end +
case when f.pseudocode_hash2 = df.pseudocode_hash2 then 1 else 0 end +
case when f.pseudocode_hash3 = df.pseudocode_hash3 then 1 else 0 end DESC"""
log_refresh("Finding with heuristic 'Same graph'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
def find_unreliable_matches(self):
choose = self.unreliable_chooser
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 2""" + postfix
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.54)
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1""" + postfix
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0""" + postfix
log_refresh("Finding with heuristic 'Nodes, edges, complexity and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and prototype' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 != 'int()'""" + postfix
log_refresh("Finding with heuristic 'Nodes, edges, complexity and prototype'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, in-degree and out-degree' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 3 and f.edges > 2
and f.indegree = df.indegree
and f.outdegree = df.outdegree""" + postfix
log_refresh("Finding with heuristic 'Nodes, edges, complexity, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges and complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0""" + postfix
log_refresh("Finding with heuristic 'Nodes, edges and complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode is not null
and f.pseudocode is not null
and f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5""" + postfix
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 50""" + postfix
log_refresh("Finding with heuristic 'Same high complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
def find_unmatched(self):
cur = self.db_cursor()
sql = "select name from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in secondary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched1:
ea = LocByName(str(name))
choose.add_item(CChooser.Item(ea, name))
self.unmatched_second = choose
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in primary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched2:
ea = row[1]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_primary = choose
cur.close()
def create_choosers(self):
self.unreliable_chooser = CChooser("Unreliable matches", self)
self.partial_chooser = CChooser("Partial matches", self)
self.best_chooser = CChooser("Best matches", self)
self.unmatched_second = CChooser("Unmatched in secondary", self, False)
self.unmatched_primary = CChooser("Unmatched in primary", self, False)
def show_choosers(self, force=False):
if len(self.best_chooser.items) > 0:
self.best_chooser.show(force)
if len(self.partial_chooser.items) > 0:
self.partial_chooser.show(force)
if self.unreliable_chooser is not None and len(self.unreliable_chooser.items) > 0:
self.unreliable_chooser.show(force)
if self.unmatched_primary is not None and len(self.unmatched_primary.items) > 0:
self.unmatched_primary.show(force)
if self.unmatched_second is not None and len(self.unmatched_second.items) > 0:
self.unmatched_second.show(force)
def register_menu(self):
global g_bindiff
g_bindiff = self
idaapi.add_menu_item("Edit/Plugins/", "Diaphora - Show results", "F3", 0, show_choosers, ())
idaapi.add_menu_item("Edit/Plugins/", "Diaphora - Save results", None, 0, save_results, ())
idaapi.add_menu_item("Edit/Plugins/", "Diaphora - Load results", None, 0, load_results, ())
Warning("""AUTOHIDE REGISTRY\nIf you close one tab you can always re-open it by pressing F3
or selecting Edit -> Plugins -> Diaphora - Show results""")
def diff(self, db):
self.last_diff_db = db
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % db)
try:
cur.execute("select value from diff.version")
except:
log("Error: %s " % sys.exc_info()[1])
Warning("The selected file does not look like a valid SQLite exported database!")
cur.close()
return False
row = cur.fetchone()
if not row:
Warning("Invalid database!")
return False
if row[0] != VERSION_VALUE:
Warning("The database is from a different version (current %s, database %s)!" % (VERSION_VALUE, row[0]))
return False
# Create the choosers
self.create_choosers()
try:
log_refresh("Performing diffing...", True)
do_continue = True
if self.equal_db():
log("The databases seems to be 100% equal")
if askyn_c(0, "HIDECANCEL\nThe databases seems to be 100% equal. Do you want to continue anyway?") != 1:
do_continue = False
if do_continue:
# Compare the call graphs
self.check_callgraph()
# Find the unmodified functions
log_refresh("Finding best matches...")
self.find_equal_matches()
# Find the modified functions
log_refresh("Finding partial matches")
self.find_matches()
if self.unreliable:
# Find using likely unreliable methods modified functions
log_refresh("Finding probably unreliable matches")
self.find_unreliable_matches()
if self.experimental:
# Find using experimental methods modified functions
log_refresh("Finding experimental matches")
self.find_experimental_matches()
# Show the list of unmatched functions in both databases
log_refresh("Finding unmatched functions")
self.find_unmatched()
# And, finally, show the list of best and partial matches and
# register the hotkey for re-opening results
self.show_choosers()
self.register_menu()
log("Done")
finally:
cur.close()
hide_wait_box()
return True
#-----------------------------------------------------------------------
def remove_file(filename):
try:
os.remove(filename)
except:
# Fix for Bug #5: https://github.com/joxeankoret/diaphora/issues/5
#
# For some reason, in Windows, the handle to the SQLite database is
# not closed, and I really try to be sure that all the databases are
# detached, no cursor is leaked, etc... So, in case we cannot remove
# the database file because it's still being used by IDA in Windows
# for some unknown reason, just drop the database's tables and after
# that continue normally.
with sqlite3.connect(filename) as db:
cur = db.cursor()
try:
funcs = ["functions", "program", "program_data", "version",
"instructions", "basic_blocks", "bb_relations",
"bb_instructions", "function_bblocks"]
for func in funcs:
db.execute("drop table if exists %s" % func)
finally:
cur.close()
class BinDiffOptions:
def __init__(self, **kwargs):
total_functions = len(list(Functions()))
self.file_out = kwargs.get('file_out', os.path.splitext(GetIdbPath())[0] + ".sqlite")
self.file_in = kwargs.get('file_in', '')
self.use_decompiler = kwargs.get('use_decompiler', True)
self.unreliable = kwargs.get('unreliable', True)
self.slow = kwargs.get('slow', True)
# Enable, by default, relaxed calculations on difference ratios for
# 'big' databases (>20k functions)
self.relax = kwargs.get('relax', total_functions > 20000)
if self.relax:
Warning(MSG_RELAXED_RATIO_ENABLED)
self.experimental = kwargs.get('experimental', False)
self.min_ea = kwargs.get('min_ea', MinEA())
self.max_ea = kwargs.get('max_ea', MaxEA())
self.ida_subs = kwargs.get('ida_subs', True)
self.ignore_sub_names = kwargs.get('ignore_sub_names', True)
self.ignore_all_names = kwargs.get('ignore_all_names', False)
self.ignore_small_functions = kwargs.get('ignore_small_functions', False)
# Enable, by default, exporting only function summaries for huge dbs.
self.func_summaries_only = kwargs.get('func_summaries_only', total_functions > 100000)
#-----------------------------------------------------------------------
def is_ida_file(filename):
filename = filename.lower()
return filename.endswith(".idb") or filename.endswith(".i64") or \
filename.endswith(".til") or filename.endswith(".id0") or \
filename.endswith(".id1") or filename.endswith(".nam")
#-----------------------------------------------------------------------
def _diff_or_export(use_ui, **options):
global g_bindiff
total_functions = len(list(Functions()))
if GetIdbPath() == "" or total_functions == 0:
Warning("No IDA database opened or no function in the database.\nPlease open an IDA database and create some functions before running this script.")
return
opts = BinDiffOptions(**options)
if use_ui:
x = CBinDiffExporterSetup()
x.Compile()
x.set_options(opts)
if not x.Execute():
return
opts = x.get_options()
if opts.file_out == opts.file_in:
Warning("Both databases are the same file!")
return
elif opts.file_out == "" or len(opts.file_out) < 5:
Warning("No output database selected or invalid filename. Please select a database file.")
return
elif is_ida_file(opts.file_in) or is_ida_file(opts.file_out):
Warning("One of the selected databases is an IDA file. Please select only database files")
return
export = True
if os.path.exists(opts.file_out):
ret = askyn_c(0, "Export database already exists. Do you want to overwrite it?")
if ret == -1:
log("Cancelled")
return
if ret == 0:
export = False
if export:
if g_bindiff is not None:
g_bindiff = None
remove_file(opts.file_out)
log("Database %s removed" % repr(opts.file_out))
try:
bd = CBinDiff(opts.file_out)
bd.use_decompiler_always = opts.use_decompiler
bd.unreliable = opts.unreliable
bd.slow_heuristics = opts.slow
bd.relaxed_ratio = opts.relax
bd.experimental = opts.experimental
bd.min_ea = opts.min_ea
bd.max_ea = opts.max_ea
bd.ida_subs = opts.ida_subs
bd.ignore_sub_names = opts.ignore_sub_names
bd.ignore_all_names = opts.ignore_all_names
bd.ignore_small_functions = opts.ignore_small_functions
bd.function_summaries_only = opts.func_summaries_only
bd.max_processed_rows = MAX_PROCESSED_ROWS * max(total_functions / 20000, 1)
bd.timeout = TIMEOUT_LIMIT * max(total_functions / 20000, 1)
if export:
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling export ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.export)
profiler.print_stats(sort="time")
else:
bd.export()
log("Database exported")
if opts.file_in != "":
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling diff ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.diff, opts.file_in)
profiler.print_stats(sort="time")
else:
bd.diff(opts.file_in)
except:
print("Error: %s" % sys.exc_info()[1])
traceback.print_exc()
return bd
#-----------------------------------------------------------------------
def diff_or_export_ui():
return _diff_or_export(True)
#-----------------------------------------------------------------------
def diff_or_export(**options):
return _diff_or_export(False, **options)
if __name__ == "__main__":
if os.getenv("DIAPHORA_AUTO") is not None:
file_out = os.getenv("DIAPHORA_EXPORT_FILE")
if file_out is None:
raise Exception("No export file specified!")
use_decompiler = os.getenv("DIAPHORA_USE_DECOMPILER")
if use_decompiler is None:
use_decompiler = False
idaapi.autoWait()
if os.path.exists(file_out):
if g_bindiff is not None:
g_bindiff = None
remove_file(file_out)
log("Database %s removed" % repr(file_out))
bd = CBinDiff(file_out)
bd.use_decompiler_always = use_decompiler
bd.export()
idaapi.qexit(0)
else:
diff_or_export_ui()
| nihilus/diaphora | diaphora.py | Python | gpl-2.0 | 140,383 |
"""engine_config - command ``engine-config --all``
==================================================
This module provides access to ovirt-engine configuration parameters
by parsing output of command ``engine-config --all``.
"""
from collections import namedtuple
from operator import itemgetter
from .. import parser, get_active_lines, CommandParser
from insights.specs import Specs
@parser(Specs.engine_config_all)
class EngineConfigAll(CommandParser):
"""Parsing output of command ``engine-config --all``
The parser tries its best to get `value` & `version` for specified
`keyword`. At the moment it works well for output which has
`keyword`, `value` & `version` in a single line. It ignores
keywords where is fails. It skip(rather fails) the `keyword`
having multi-line output.
Typical output of ``engine-config --all`` command is::
MaxRerunVmOnVdsCount: 3 version: general
MaxStorageVdsTimeoutCheckSec: 30 version: general
ClusterRequiredRngSourcesDefault: version: 3.6
ClusterRequiredRngSourcesDefault: version: 4.0
ClusterRequiredRngSourcesDefault: version: 4.1
HotUnplugCpuSupported: {"x86":"false","ppc":"false"} version: 3.6
HotUnplugCpuSupported: {"x86":"false","ppc":"false"} version: 4.0
HotUnplugCpuSupported: {"x86":"true","ppc":"true"} version: 4.1
Examples:
>>> from insights.parsers.engine_config import EngineConfigAll
>>> from insights.tests import context_wrap
>>> output = EngineConfigAll(context_wrap(OUTPUT))}
>>> 'MaxRerunVmOnVdsCount' in output
True
>>> output['MaxRerunVmOnVdsCount']
['3']
>>> output.get('MaxRerunVmOnVdsCount')
['3']
>>> output['HotUnplugCpuSupported']
['{"x86":"false","ppc":"false"}', '{"x86":"false","ppc":"false"}', '{"x86":"true","ppc":"true"}']
>>> output['ClusterRequiredRngSourcesDefault']
[]
>>> output.head('HotUnplugCpuSupported')
'{"x86":"false","ppc":"false"}'
>>> output.last('HotUnplugCpuSupported')
'{"x86":"true","ppc":"true"}'
>>> output.get_version('HotUnplugCpuSupported')
['3.6', '4.0', '4.1']
>>> 'IDoNotExit' in output
False
>>> output['IDoNotExit']
[]
>>> output.get('IDoNotExit')
[]
>>> output.get_version('IDoNotExit')
[]
>>> output.head('IDoNotExist)
>>> output.last('IDoNotExist)
Attributes:
fields (list): List of `KeyValue` namedtupules for
each line in the configuration file.
keywords (set): Set of keywords present in the configuration
file, each keyword has been converted to lowercase.
"""
keyvalue = namedtuple('KeyValue',
['keyword', 'value', 'version', 'kw_lower'])
"""namedtuple: Represent name value pair as a namedtuple with case."""
def parse_content(self, content):
"""Parse each active line for keyword, values & version.
Args:
content (list): Output of command ``engine-config --all``.
"""
self.fields = []
for line in get_active_lines(content):
try:
key, val, ver = itemgetter(0, 1, -1)(line.split(' '))
self.fields.append(self.keyvalue(key.strip(':'), val, ver, key.strip(':').lower())) # noqa
except:
# TODO: Log an exception.
pass
self.keywords = set([kw.kw_lower for kw in self.fields])
def __contains__(self, keyword):
return keyword.lower() in self.keywords
def __iter__(self):
return iter(self.fields)
def __getitem__(self, keyword):
return self.get(keyword)
def get(self, keyword):
"""A get value for keyword specified. A "dictionary like" method.
Example:
>>> output.get('MaxStorageVdsTimeoutCheckSec')
['30']
Args:
keyword (str): A key. For ex. `HotUnplugCpuSupported`.
Returns:
list: Values associated with a keyword. Returns an empty
list if, all the values are empty or `keyword` does not
exist.
"""
kw = keyword.lower()
if kw in self.keywords:
return [kv.value for kv in self.fields if kv.kw_lower == kw if kv.value]
return []
def head(self, keyword):
"""Get first element from values(list).
Example:
>>> output['HotUnplugCpuSupported']
['{"x86":"false","ppc":"false"}', '{"x86":"false","ppc":"false"}', '{"x86":"true","ppc":"true"}']
>>> output.head('HotUnplugCpuSupported')
'{"x86":"false","ppc":"false"}'
Args:
keyword (str): A key. For ex. `HotUnplugCpuSupported`.
Returns:
str: First element from values(list) associated with a keyword else None
"""
values = self.__getitem__(keyword)
if values:
return values[0]
def last(self, keyword):
"""Get last element from values(list).
Example:
>>> output['HotUnplugCpuSupported']
['{"x86":"false","ppc":"false"}', '{"x86":"false","ppc":"false"}', '{"x86":"true","ppc":"true"}']
>>> output.last('HotUnplugCpuSupported')
'{"x86":"true","ppc":"true"}'
Args:
keyword (str): A key. For ex. `HotUnplugCpuSupported`.
Returns:
str: Last element from values(list) associated with a keyword else None.
"""
values = self.__getitem__(keyword)
if values:
return values[-1]
def get_version(self, keyword):
"""Get versions associated with a key.
Typical output is ``engine-config --all`` command::
MaxStorageVdsTimeoutCheckSec: 30 version: general
HotUnplugCpuSupported: {"x86":"false","ppc":"false"} version: 3.6
HotUnplugCpuSupported: {"x86":"false","ppc":"false"} version: 4.0
HotUnplugCpuSupported: {"x86":"true","ppc":"true"} version: 4.1
Examples:
>>> output.get_version('MaxStorageVdsTimeoutCheckSec')
['general']
>>> output.get_version('HotUnplugCpuSupported')
['3.6', '4.0', '4.1']
Args:
keyword (str): A key. For ex. `HotUnplugCpuSupported`.
Returns:
list: Versions associated with a keyword. Returns an empty
list if, all the versions are empty or `keyword` does not
exist.
"""
kw = keyword.lower()
if kw in self.keywords:
return [kv.version for kv in self.fields if kv.kw_lower == kw if kv.version]
return []
| RedHatInsights/insights-core | insights/parsers/engine_config.py | Python | apache-2.0 | 6,803 |
#!/usr/bin/env python
""" UCS worker class for pyflex
"""
from worker import FlexWorker
from functions.functions_ucs import UcsFunctions
from UcsSdk import *
class NexusWorker(FlexWorker):
"""
A child worker class that generates and
installs Nexus configurations
"""
def startworker(self):
""" Starts this worker """
#Connect to UCSM
handle = UcsHandle()
nxauth = self.config['auth']['nexus']
handle.Login(
nxauth['switcha'],
nxauth['user'],
nxauth['pass']
)
fxns = UcsFunctions(handle, self.config)
configa = fxns.gen_snippet('../templates/N5K-A.j2')
configb = fxns.gen_snippet('../templates/N5K-B.j2')
transmit_config(nxauth['a'], configa)
transmit_config(nxauth['b'], configb)
#TODO: Need to do some clean-up tasks below, or figure out how to slipstream things into the initial config.
# For instance, the WWPN aliases and zoning - that shouldn't be in the template at all. That should be
# generated from a smaller template and transmitted using dynamically retrieved data from UCS | Mierdin/pyflex | workers/nexus.py | Python | apache-2.0 | 1,172 |
from utils import getJson
from utils import getText
| inbloom/legacy-projects | lri-middleware/utils/__init__.py | Python | apache-2.0 | 52 |
def test(str):
str = str.replace(' ', '')
rev = str[::-1]
return 'true' if str == rev else 'false'
print test("never odd or even")
| DevCouch/coderbyte_python | easy/palindrome.py | Python | gpl-2.0 | 149 |
#$#HEADER-START
# vim:set expandtab ts=4 sw=4 ai ft=python:
#
# Reactor Configuration Event Engine
#
# Copyright (C) 2016 Brandon Gillespie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#$#HEADER-END
# pylint: disable=missing-docstring
import sys
import os
import re
#from builtins import input # pylint: disable=redefined-builtin
import reactor
from reactor.backend import Core
from reactor.action import Action
################################################################################
class ControlCli(reactor.Base):
############################################################
# pylint: disable=super-init-not-called
def __init__(self, base=None):
if base:
reactor.Base.__inherit__(self, base)
if 'REACTOR_URL' in self.cfg.keys() and len(self.cfg['REACTOR_URL']):
self.base_url = self.cfg['REACTOR_URL']
############################################################
def update_cli(self): # pylint: disable=unused-argument
# todo: current version printed
answer = input("Update to latest version of reactor? [yes] ")
if answer.lower() not in ["yes", "y", ""]:
sys.exit(0)
if not reactor.BASEDIR:
sys.exit("Unable to find reactor basedir!")
os.chdir(reactor.BASEDIR)
action = Action(base=self)
action.do("update")
############################################################
def wizard_cli(self, argv, args): # pylint: disable=unused-argument
for key in 'URL', 'APIKEY', 'TOKEN':
if 'SET_REACTOR_' + key in os.environ.keys():
self.cfg['REACTOR_' + key] = os.environ['SET_REACTOR_' + key]
else:
self.cfg['REACTOR_' + key] = input('REACTOR_' + key + ': ')
# do this one without terminal echo
os.system("stty -echo")
if 'SET_REACTOR_SECRET'in os.environ.keys():
self.cfg['REACTOR_SECRET'] = os.environ['SET_REACTOR_SECRET']
else:
self.cfg['REACTOR_SECRET'] = input('REACTOR_SECRET: ')
os.system("stty echo")
self.NOTIFY("")
self.cfg_save()
############################################################
def set_cli(self, argv, args): # pylint: disable=unused-argument
match = re.match(r'^([a-zA-Z_-]+)\s*=\s*(.*)$', " ".join(argv))
if not match:
self.NOTIFY("set var=value")
return
param = match.group(1)
if len(match.group(2)):
value = match.group(2)
else:
value = input("Input Value: ")
self.cfg[param] = value
self.cfg_save()
############################################################
def unset_cli(self, argv, args): # pylint: disable=unused-argument
try:
key = argv[0]
del self.cfg[key]
self.cfg_save()
except KeyError:
self.NOTIFY("parameter '" + key + "' not found.")
except IndexError:
self.NOTIFY("missing parameter name to unset.")
############################################################
def list_cli(self, argv, args): # pylint: disable=unused-argument
"""dump the reactor config"""
altcfg = self.cfg
#altcfg['REACTOR_SECRET'] = 'xxxxxxxxxxxxx'
#altcfg['REACTOR_APIKEY'] = 'xxxxxxxxxxxxx'
for line in altcfg:
self.NOTIFY(line + "=" + altcfg[line])
############################################################
def apikey_cli(self, argv, args, cli):
self.NOTIFY("Specify Administrative Token (end with newline):")
admkey = input("Admin API Token: ")
self.NOTIFY("")
action = args.get('action')
target = " ".join(argv)
dbo = Core(base=self)
try:
if action in ('delete', 'create'):
if not len(target):
cli.fail("No target specified for " + action)
getattr(self, "apikey_cli__" + action)(dbo, admkey, target, cli)
else:
self.apikey_cli__list(dbo, admkey, cli)
except reactor.CannotContinueError as err:
err = str(err)
if ": 401" in err:
err = err + " Unauthorized"
self.ABORT(err)
############################################################
# pylint: disable=no-self-use,unused-argument
def apikey_cli__delete(self, dbo, admkey, target, cli):
dbo.delete_object("apikey", target, apikey=admkey)
############################################################
def apikey_cli__list(self, dbo, admkey, cli): # pylint: disable=unused-argument
self.NOTIFY("{0:20} {1:24} {2:24} {3}".format("Name", "Id", "CreatedAt", "Scope"))
for obj in dbo.list_objects("apikey", apikey=admkey):
self.NOTIFY("{name:20} {id:24} {createdAt:24} {scope}".format(**obj))
############################################################
def apikey_cli__create(self, dbo, admkey, target, cli):
keyval = target.split("=", 1)
if len(keyval) != 2:
cli.fail("Must make assignment as {name}={scope}")
(name, scope) = keyval
if scope in ["super", "sensitive", "write", "read"]:
res = dbo.create_object("apikey",
{"name":name, "scope":scope},
apikey=admkey)
self.NOTIFY("Created name={0}, id={1}, secret apikey:\n\n{2}\n"
.format(name, res['id'],
res.get('apikey', res.get('token', 'n/a'))))
else:
cli.fail("Scope is missing or wrong")
| srevenant/reactor | src/reactor/control.py | Python | agpl-3.0 | 6,339 |
from security.users_repository import UsersRepository
import uuid
class Users(object):
def __init__(self):
self.users_repository = UsersRepository()
def get_all_users(self):
return self.users_repository.get_all_users()
def get_user(self, name):
return self.users_repository.get_user(name)
def create_user(self, user):
user['uuid'] = str(uuid.uuid4())
return self.users_repository.create_user(user)
def delete_user(self, name):
return self.users_repository.delete_user(name)
def update_user(self, name, user):
return self.users_repository.update_user(name, user) | CALlanoR/VirtShell | virtshell_server/virtshell_server/security/users.py | Python | gpl-2.0 | 647 |
from edmunds.support.serviceprovider import ServiceProvider
from werkzeug.exceptions import default_exceptions
from edmunds.exceptions.handler import Handler
class ExceptionsServiceProvider(ServiceProvider):
"""
Exceptions Service Provider
"""
def register(self):
"""
Register the service provider
"""
# Construct and define handler
handler_class = self.app.config('app.exceptions.handler', Handler)
handler = handler_class(self.app)
self.app.extensions['edmunds.exceptions.handler'] = handler
# Add all the exception to handle
exceptions = list(default_exceptions.values())
exceptions.append(Exception)
# Register each exception
for exception_class in exceptions:
@self.app.errorhandler(exception_class)
def handle_exception(exception):
"""
Handle an exception
:param exception: The exception
:type exception: Exception
:return: The response
"""
self.app.extensions['edmunds.exceptions.handler'].report(exception)
return self.app.extensions['edmunds.exceptions.handler'].render(exception)
| LowieHuyghe/edmunds | edmunds/exceptions/exceptionsserviceprovider.py | Python | apache-2.0 | 1,277 |
#----------------------------------------------------------------------
# Copyright 2012-2020 EPCC, The University of Edinburgh
#
# This file is part of bolt.
#
# bolt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# bolt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bolt. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
| aturner-epcc/bolt | modules/__init__.py | Python | gpl-3.0 | 843 |
'''Multiple Testing and P-Value Correction
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import range
from statsmodels.compat.collections import OrderedDict
import numpy as np
#==============================================
#
# Part 1: Multiple Tests and P-Value Correction
#
#==============================================
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
multitest_methods_names = {'b': 'Bonferroni',
's': 'Sidak',
'h': 'Holm',
'hs': 'Holm-Sidak',
'sh': 'Simes-Hochberg',
'ho': 'Hommel',
'fdr_bh': 'FDR Benjamini-Hochberg',
'fdr_by': 'FDR Benjamini-Yekutieli',
'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg',
'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli',
'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'
}
_alias_list = [['b', 'bonf', 'bonferroni'],
['s', 'sidak'],
['h', 'holm'],
['hs', 'holm-sidak'],
['sh', 'simes-hochberg'],
['ho', 'hommel'],
['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp'],
['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr'],
['fdr_tsbh', 'fdr_2sbh'],
['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage'],
['fdr_gbs']
]
multitest_alias = OrderedDict()
for m in _alias_list:
multitest_alias[m[0]] = m[0]
for a in m[1:]:
multitest_alias[a] = m[0]
def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False,
returnsorted=False):
'''test results and p-value correction for multiple tests
Parameters
----------
pvals : array_like
uncorrected p-values
alpha : float
FWER, family-wise error rate, e.g. 0.1
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`holm` : step-down method using Bonferroni adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
`fdr_bh` : Benjamini/Hochberg (non-negative)
`fdr_by` : Benjamini/Yekutieli (negative)
`fdr_tsbh` : two stage fdr correction (non-negative)
`fdr_tsbky` : two stage fdr correction (non-negative)
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
returnsorted : bool
not tested, return sorted p-values instead of original sequence
Returns
-------
reject : array, boolean
true for hypothesis that can be rejected for given alpha
pvals_corrected : array
p-values corrected for multiple tests
alphacSidak: float
corrected alpha for Sidak method
alphacBonf: float
corrected alpha for Bonferroni method
Notes
-----
Except for 'fdr_twostage', the p-value correction is independent of the
alpha specified as argument. In these cases the corrected p-values
can also be compared with a different alpha. In the case of 'fdr_twostage',
the corrected p-values are specific to the given alpha, see
``fdrcorrection_twostage``.
all corrected p-values now tested against R.
insufficient "cosmetic" tests yet
The 'fdr_gbs' procedure is not verified against another package, p-values
are derived from scratch and are not derived in the reference. In Monte
Carlo experiments the method worked correctly and maintained the false
discovery rate.
All procedures that are included, control FWER or FDR in the independent
case, and most are robust in the positively correlated case.
`fdr_gbs`: high power, fdr control for independent case and only small
violation in positively correlated case
**Timing**:
Most of the time with large arrays is spent in `argsort`. When
we want to calculate the p-value for several methods, then it is more
efficient to presort the pvalues, and put the results back into the
original order outside of the function.
Method='hommel' is very slow for large arrays, since it requires the
evaluation of n partitions, where n is the number of p-values.
there will be API changes.
References
----------
'''
import gc
pvals = np.asarray(pvals)
alphaf = alpha # Notation ?
if not is_sorted:
sortind = np.argsort(pvals)
pvals = np.take(pvals, sortind)
ntests = len(pvals)
alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
alphacBonf = alphaf / float(ntests)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject = pvals <= alphacBonf
pvals_corrected = pvals * float(ntests)
elif method.lower() in ['s', 'sidak']:
reject = pvals <= alphacSidak
pvals_corrected = 1 - np.power((1. - pvals), ntests)
elif method.lower() in ['hs', 'holm-sidak']:
alphacSidak_all = 1 - np.power((1. - alphaf),
1./np.arange(ntests, 0, -1))
notreject = pvals > alphacSidak_all
del alphacSidak_all
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
del notreject
pvals_corrected_raw = 1 - np.power((1. - pvals),
np.arange(ntests, 0, -1))
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
elif method.lower() in ['h', 'holm']:
notreject = pvals > alphaf / np.arange(ntests, 0, -1)
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
pvals_corrected_raw = pvals * np.arange(ntests, 0, -1)
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
gc.collect()
elif method.lower() in ['sh', 'simes-hochberg']:
alphash = alphaf / np.arange(ntests, 0, -1)
reject = pvals <= alphash
rejind = np.nonzero(reject)
if rejind[0].size > 0:
rejectmax = np.max(np.nonzero(reject))
reject[:rejectmax] = True
pvals_corrected_raw = np.arange(ntests, 0, -1) * pvals
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
elif method.lower() in ['ho', 'hommel']:
# we need a copy because we overwrite it in a loop
a = pvals.copy()
for m in range(ntests, 1, -1):
cim = np.min(m * pvals[-m:] / np.arange(1,m+1.))
a[-m:] = np.maximum(a[-m:], cim)
a[:-m] = np.maximum(a[:-m], np.minimum(m * pvals[:-m], cim))
pvals_corrected = a
reject = a <= alphaf
elif method.lower() in ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='indep',
is_sorted=True)
elif method.lower() in ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='n',
is_sorted=True)
elif method.lower() in ['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bky',
is_sorted=True)[:2]
elif method.lower() in ['fdr_tsbh', 'fdr_2sbh']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bh',
is_sorted=True)[:2]
elif method.lower() in ['fdr_gbs']:
#adaptive stepdown in Gavrilov, Benjamini, Sarkar, Annals of Statistics 2009
## notreject = pvals > alphaf / np.arange(ntests, 0, -1) #alphacSidak
## notrejectmin = np.min(np.nonzero(notreject))
## notreject[notrejectmin:] = True
## reject = ~notreject
ii = np.arange(1, ntests + 1)
q = (ntests + 1. - ii)/ii * pvals / (1. - pvals)
pvals_corrected_raw = np.maximum.accumulate(q) #up requirementd
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
reject = pvals_corrected <= alpha
else:
raise ValueError('method not recognized')
if not pvals_corrected is None: #not necessary anymore
pvals_corrected[pvals_corrected>1] = 1
if is_sorted or returnsorted:
return reject, pvals_corrected, alphacSidak, alphacBonf
else:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[sortind] = reject
return reject_, pvals_corrected_, alphacSidak, alphacBonf
def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):
'''pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr')
Returns
-------
rejected : array, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then alpha
should be set to alpha * m/m_0 where m is the number of tests,
given by the p-values, and m_0 is an estimate of the true hypothesis.
(see Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the number
of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for
fdr_by.
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) #corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
## elif method in ['n', 'negcorr']:
## cm = np.sum(np.arange(len(pvals)))
## ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and necorr implemented')
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
if not is_sorted:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
else:
return reject, pvals_corrected
def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False,
is_sorted=False):
'''(iterated) two stage linear step-up procedure with estimation of number of true
hypotheses
Benjamini, Krieger and Yekuteli, procedure in Definition 6
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'bky', 'bh')
see Notes for details
'bky' : implements the procedure in Definition 6 of Benjamini, Krieger
and Yekuteli 2006
'bh' : implements the two stage method of Benjamini and Hochberg
iter ; bool
Returns
-------
rejected : array, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : array
pvalues adjusted for multiple hypotheses testing to limit FDR
m0 : int
ntest - rej, estimated number of true hypotheses
alpha_stages : list of floats
A list of alphas that have been used at each stage
Notes
-----
The returned corrected p-values are specific to the given alpha, they
cannot be used for a different alpha.
The returned corrected p-values are from the last stage of the fdr_bh
linear step-up procedure (fdrcorrection0 with method='indep') corrected
for the estimated fraction of true hypotheses.
This means that the rejection decision can be obtained with
``pval_corrected <= alpha``, where ``alpha`` is the origianal significance
level.
(Note: This has changed from earlier versions (<0.5.0) of statsmodels.)
BKY described several other multi-stage methods, which would be easy to implement.
However, in their simulation the simple two-stage method (with iter=False) was the
most robust to the presence of positive correlation
TODO: What should be returned?
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals = np.take(pvals, pvals_sortind)
ntests = len(pvals)
if method == 'bky':
fact = (1.+alpha)
alpha_prime = alpha / fact
elif method == 'bh':
fact = 1.
alpha_prime = alpha
else:
raise ValueError("only 'bky' and 'bh' are available as method")
alpha_stages = [alpha_prime]
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_prime, method='indep',
is_sorted=True)
r1 = rej.sum()
if (r1 == 0) or (r1 == ntests):
return rej, pvalscorr * fact, ntests - r1, alpha_stages
ri_old = r1
while True:
ntests0 = 1.0 * ntests - ri_old
alpha_star = alpha_prime * ntests / ntests0
alpha_stages.append(alpha_star)
#print ntests0, alpha_star
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep',
is_sorted=True)
ri = rej.sum()
if (not iter) or ri == ri_old:
break
elif ri < ri_old:
# prevent cycles and endless loops
raise RuntimeError(" oops - shouldn't be here")
ri_old = ri
# make adjustment to pvalscorr to reflect estimated number of Non-Null cases
# decision is then pvalscorr < alpha (or <=)
pvalscorr *= ntests0 * 1.0 / ntests
if method == 'bky':
pvalscorr *= (1. + alpha)
if not is_sorted:
pvalscorr_ = np.empty_like(pvalscorr)
pvalscorr_[pvals_sortind] = pvalscorr
del pvalscorr
reject = np.empty_like(rej)
reject[pvals_sortind] = rej
return reject, pvalscorr_, ntests - ri, alpha_stages
else:
return rej, pvalscorr, ntests - ri, alpha_stages
| rgommers/statsmodels | statsmodels/stats/multitest.py | Python | bsd-3-clause | 16,540 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from horizon.contrib import bootstrap_datepicker
from django.conf import settings
from django import template
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon.base import Horizon # noqa
from horizon import conf
register = template.Library()
@register.filter
def has_permissions(user, component):
"""Checks if the given user meets the permissions requirements for
the component.
"""
return user.has_perms(getattr(component, 'permissions', set()))
@register.filter
def has_permissions_on_list(components, user):
return [component for component
in components if has_permissions(user, component)]
@register.inclusion_tag('horizon/_accordion_nav.html', takes_context=True)
def horizon_nav(context):
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
current_panel = context['request'].horizon.get('panel', None)
dashboards = []
for dash in Horizon.get_dashboards():
panel_groups = dash.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
non_empty_groups.append((group.name, allowed_panels))
if (callable(dash.nav) and dash.nav(context) and
dash.can_access(context)):
dashboards.append((dash, SortedDict(non_empty_groups)))
elif (not callable(dash.nav) and dash.nav and
dash.can_access(context)):
dashboards.append((dash, SortedDict(non_empty_groups)))
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'current_panel': current_panel.slug if current_panel else '',
'request': context['request']}
@register.inclusion_tag('horizon/_nav_list.html', takes_context=True)
def horizon_main_nav(context):
"""Generates top-level dashboard navigation entries."""
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if dash.can_access(context):
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'request': context['request']}
@register.inclusion_tag('horizon/_subnav_list.html', takes_context=True)
def horizon_dashboard_nav(context):
"""Generates sub-navigation entries for the current dashboard."""
if 'request' not in context:
return {}
dashboard = context['request'].horizon['dashboard']
panel_groups = dashboard.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
if group.name is None:
non_empty_groups.append((dashboard.name, allowed_panels))
else:
non_empty_groups.append((group.name, allowed_panels))
return {'components': SortedDict(non_empty_groups),
'user': context['request'].user,
'current': context['request'].horizon['panel'].slug,
'request': context['request']}
@register.filter
def quota(val, units=None):
if val == float("inf"):
return _("No Limit")
elif units is not None:
return "%s %s %s" % (val, force_text(units),
force_text(_("Available")))
else:
return "%s %s" % (val, force_text(_("Available")))
@register.filter
def quotainf(val, units=None):
if val == float("inf"):
return _("No Limit")
elif units is not None:
return "%s %s" % (val, units)
else:
return val
class JSTemplateNode(template.Node):
"""Helper node for the ``jstemplate`` template tag."""
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context,):
output = self.nodelist.render(context)
output = output.replace('[[[', '{{{').replace(']]]', '}}}')
output = output.replace('[[', '{{').replace(']]', '}}')
output = output.replace('[%', '{%').replace('%]', '%}')
return output
@register.tag
def jstemplate(parser, token):
"""Replaces ``[[[`` and ``]]]`` with ``{{{`` and ``}}}``,
``[[`` and ``]]`` with ``{{`` and ``}}`` and
``[%`` and ``%]`` with ``{%`` and ``%}`` to avoid conflicts
with Django's template engine when using any of the Mustache-based
templating libraries.
"""
nodelist = parser.parse(('endjstemplate',))
parser.delete_first_token()
return JSTemplateNode(nodelist)
@register.assignment_tag
def load_config():
return conf.HORIZON_CONFIG
@register.assignment_tag
def datepicker_locale():
locale_mapping = getattr(settings, 'DATEPICKER_LOCALES',
bootstrap_datepicker.LOCALE_MAPPING)
return locale_mapping.get(translation.get_language(), 'en')
| zouyapeng/horizon-newtouch | horizon/templatetags/horizon.py | Python | apache-2.0 | 6,643 |
#!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will determine the overall range of signal strengths received during the whole session.
# this program can be run standalone but is usually run at end of session by doscanw.py
# Its output will be stored in 2 files:
# dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum
# and minimum of the whole session. The second contains a chart of all the min and max values for each of
# the scan files
from glob import glob
import numpy as np
import radioConfig
import subprocess
import os
import datetime
import sys
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def outmsg(smsg):
thisprogmsg = "..findsessionrangew.py: " + smsg
print(thisprogmsg)
def strinsert(source_str, insert_str, pos):
return source_str[:pos]+insert_str+source_str[pos:]
outmsg("Finding session range...")
globmax = -9000
globmin = 9000
sessmin = np.empty(shape=[0, 1])
sessmax = np.empty(shape=[0, 1])
scantimeline = np.empty(shape=[0, 1])
sessionfolder = sys.argv[1]
overviewname = sessionfolder + os.sep + 'session-overview.png'
minmaxname = sessionfolder + os.sep + 'dbminmax.txt'
binpattern = sessionfolder + os.sep + '*.bin'
files_in_dir = sorted(glob(binpattern))
for fname in files_in_dir:
outmsg(fname)
dbs = np.fromfile(fname, dtype='float32')
thismin=dbs.min()
thismax=dbs.max()
scantime=str(fname)[20:26]
scandate=str(fname)[12:20]
if thismin < globmin:
globmin = thismin
if thismax > globmax:
globmax = thismax
sessmin = np.append(sessmin, thismin)
sessmax = np.append(sessmax, thismax)
scantime = strinsert(scantime, ":", 2)
scantime = strinsert(scantime, ":", 5)
scantime = scandate[-2:] + " " + scantime
msg = "%s %s %f %f" % (scandate,scantime,thismin,thismax)
outmsg(msg)
scantimeline = np.append(scantimeline, scantime)
mytitle = 'This session signal range: min %.2f .. max %.2f' % (globmin,globmax)
outmsg(mytitle)
# this red plot will help us in finding the scan with highest power range
# (when using the gainloop.py program it will be useful to find the best gain values)
# adding globmin value just to offset the red plot to the middle of the chart
sessdiff = ( sessmax - sessmin ) + globmin
xs = range(len(scantimeline))
plt.figure(figsize=(12, 9), dpi=600)
plt.xlabel('Scan time (UTC)', fontsize=8)
plt.ylabel('Signal power', fontsize=8)
plt.tick_params(labelsize=8)
plt.plot(xs, sessmax, linestyle='--', marker='o' )
plt.plot(xs, sessmin, linestyle='--', marker='o' )
#plt.plot(xs,sessdiff )
plt.xticks(xs,scantimeline,rotation=70,fontsize=8)
for i,j in zip(xs,sessmin):
tann = '%.1f' % j
plt.annotate( tann, xy=(i,j), xytext=(0,15), textcoords='offset points', fontsize=8 )
for i,j in zip(xs,sessmax):
tann = '%.1f' % j
plt.annotate( tann, xy=(i,j), xytext=(0,-20), textcoords='offset points', fontsize=8 )
plt.grid()
#leg = plt.legend( ('maxima','minima','difference'), loc='upper right' )
leg = plt.legend( ('maxima','minima'), loc='upper right' )
leg.get_frame().set_alpha(0.5)
plt.title(mytitle)
#plt.show()
plt.tight_layout()
plt.savefig(overviewname)
sessfile = open(minmaxname, "w")
sessfile.write(str(globmax))
sessfile.write("\n")
sessfile.write(str(globmin))
sessfile.write("\n")
sessfile.close()
outmsg("Session signal range chart saved")
| mariocannistra/radio-astronomy-fftw | findsessionrangew.py | Python | mit | 3,584 |
# Copyright 2018 Camptocamp SA
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2016 ForgeFlow S.L. (https://www.forgeflow.com)
# Copyright 2016 Therp BV <http://therp.nl>
# Copyright 2019 JARSA Sistemas S.A. de C.V.
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo.tests.common import SavepointCase
class TestStockLogisticsWarehouse(SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pickingObj = cls.env['stock.picking']
cls.productObj = cls.env['product.product']
cls.templateObj = cls.env['product.template']
cls.supplier_location = cls.env.ref('stock.stock_location_suppliers')
cls.stock_location = cls.env.ref('stock.stock_location_stock')
cls.customer_location = cls.env.ref('stock.stock_location_customers')
cls.uom_unit = cls.env.ref('uom.product_uom_unit')
cls.main_company = cls.env.ref('base.main_company')
cls.bin_a = cls.env['stock.location'].create({
'usage': 'internal',
'name': 'Bin A',
'location_id': cls.stock_location.id,
'company_id': cls.main_company.id
})
cls.bin_b = cls.env['stock.location'].create({
'usage': 'internal',
'name': 'Bin B',
'location_id': cls.stock_location.id,
'company_id': cls.main_company.id
})
cls.env['stock.location']._parent_store_compute()
# Create product template
cls.templateAB = cls.templateObj.create({
'name': 'templAB',
'uom_id': cls.uom_unit.id,
})
cls.productC = cls.templateAB.product_variant_ids
# Create product A and B
cls.productA = cls.productObj.create({
'name': 'product A',
'standard_price': 1,
'type': 'product',
'uom_id': cls.uom_unit.id,
'default_code': 'A',
'product_tmpl_id': cls.templateAB.id,
})
cls.productB = cls.productObj.create({
'name': 'product B',
'standard_price': 1,
'type': 'product',
'uom_id': cls.uom_unit.id,
'default_code': 'B',
'product_tmpl_id': cls.templateAB.id,
})
# Create a picking move from INCOMING to STOCK
cls.pickingInA = cls.pickingObj.create({
'picking_type_id': cls.env.ref('stock.picking_type_in').id,
'location_id': cls.supplier_location.id,
'location_dest_id': cls.stock_location.id,
'move_lines': [
(0, 0, {
'name': 'Test move',
'product_id': cls.productA.id,
'product_uom': cls.productA.uom_id.id,
'product_uom_qty': 2,
'quantity_done': 2,
'location_id': cls.supplier_location.id,
'location_dest_id': cls.stock_location.id,
})
]
})
cls.pickingInB = cls.pickingObj.create({
'picking_type_id': cls.env.ref('stock.picking_type_in').id,
'location_id': cls.supplier_location.id,
'location_dest_id': cls.stock_location.id,
'move_lines': [
(0, 0, {
'name': 'Test move',
'product_id': cls.productB.id,
'product_uom': cls.productB.uom_id.id,
'product_uom_qty': 3,
'quantity_done': 3,
'location_id': cls.supplier_location.id,
'location_dest_id': cls.stock_location.id,
})
]
})
cls.pickingOutA = cls.pickingObj.create({
'picking_type_id': cls.env.ref('stock.picking_type_out').id,
'location_id': cls.stock_location.id,
'location_dest_id': cls.customer_location.id,
'move_lines': [
(0, 0, {
'name': 'Test move',
'product_id': cls.productB.id,
'product_uom': cls.productB.uom_id.id,
'product_uom_qty': 2,
'location_id': cls.stock_location.id,
'location_dest_id': cls.customer_location.id,
})
]
})
def compare_qty_available_not_res(self, product, value):
product.invalidate_cache()
self.assertEqual(product.qty_available_not_res, value)
def test_stock_levels(self):
"""checking that qty_available_not_res actually reflects \
the variations in stock, both on product and template"""
self.compare_qty_available_not_res(self.productA, 0)
self.compare_qty_available_not_res(self.templateAB, 0)
self.pickingInA.action_confirm()
self.compare_qty_available_not_res(self.productA, 0)
self.compare_qty_available_not_res(self.templateAB, 0)
self.pickingInA.action_assign()
self.compare_qty_available_not_res(self.productA, 0)
self.compare_qty_available_not_res(self.templateAB, 0)
self.pickingInA.button_validate()
self.compare_qty_available_not_res(self.productA, 2)
self.compare_qty_available_not_res(self.templateAB, 2)
# will directly trigger action_done on self.productB
self.pickingInB.action_done()
self.compare_qty_available_not_res(self.productA, 2)
self.compare_qty_available_not_res(self.productB, 3)
self.compare_qty_available_not_res(self.templateAB, 5)
self.compare_qty_available_not_res(self.productB, 3)
self.compare_qty_available_not_res(self.templateAB, 5)
self.pickingOutA.action_confirm()
self.compare_qty_available_not_res(self.productB, 3)
self.compare_qty_available_not_res(self.templateAB, 5)
self.pickingOutA.action_assign()
self.compare_qty_available_not_res(self.productB, 1)
self.compare_qty_available_not_res(self.templateAB, 3)
self.pickingOutA.action_done()
self.compare_qty_available_not_res(self.productB, 1)
self.compare_qty_available_not_res(self.templateAB, 3)
self.templateAB.action_open_quants_unreserved()
def test_more_than_one_quant(self):
self.env['stock.quant'].create(
{'location_id': self.stock_location.id,
'company_id': self.main_company.id,
'product_id': self.productA.id,
'quantity': 10.0})
self.env['stock.quant'].create(
{'location_id': self.bin_a.id,
'company_id': self.main_company.id,
'product_id': self.productA.id,
'quantity': 10.0})
self.env['stock.quant'].create(
{'location_id': self.bin_b.id,
'company_id': self.main_company.id,
'product_id': self.productA.id,
'quantity': 60.0})
self.compare_qty_available_not_res(self.productA, 80)
def check_variants_found_correctly(self, operator, value, expected):
domain = [('id', 'in', self.templateAB.product_variant_ids.ids)]
return self.check_found_correctly(self.env['product.product'],
domain, operator, value, expected)
def check_template_found_correctly(self, operator, value, expected):
# There may be other products already in the system: ignore those
domain = [('id', 'in', self.templateAB.ids)]
return self.check_found_correctly(self.env['product.template'],
domain, operator, value, expected)
def check_found_correctly(self, model, domain, operator, value, expected):
found = model.search(domain + [
('qty_available_not_res', operator, value)]
)
if found != expected:
self.fail(
"Searching for products failed: search for unreserved "
"quantity {operator} {value}; expected to find "
"{expected}, but found {found}".format(
operator=operator,
value=value,
expected=expected or "no products",
found=found,
)
)
def test_stock_search(self):
all_variants = self.templateAB.product_variant_ids
a_and_b = self.productA + self.productB
b_and_c = self.productB + self.productC
a_and_c = self.productA + self.productC
no_variants = self.env['product.product']
no_template = self.env['product.template']
# Start: one template with three variants.
# All variants have zero unreserved stock
self.check_variants_found_correctly('=', 0, all_variants)
self.check_variants_found_correctly('>=', 0, all_variants)
self.check_variants_found_correctly('<=', 0, all_variants)
self.check_variants_found_correctly('>', 0, no_variants)
self.check_variants_found_correctly('<', 0, no_variants)
self.check_variants_found_correctly('!=', 0, no_variants)
self.check_template_found_correctly('=', 0, self.templateAB)
self.check_template_found_correctly('>=', 0, self.templateAB)
self.check_template_found_correctly('<=', 0, self.templateAB)
self.check_template_found_correctly('>', 0, no_template)
self.check_template_found_correctly('<', 0, no_template)
self.check_template_found_correctly('!=', 0, no_template)
self.pickingInA.action_confirm()
# All variants still have zero unreserved stock
self.check_variants_found_correctly('=', 0, all_variants)
self.check_variants_found_correctly('>=', 0, all_variants)
self.check_variants_found_correctly('<=', 0, all_variants)
self.check_variants_found_correctly('>', 0, no_variants)
self.check_variants_found_correctly('<', 0, no_variants)
self.check_variants_found_correctly('!=', 0, no_variants)
self.check_template_found_correctly('=', 0, self.templateAB)
self.check_template_found_correctly('>=', 0, self.templateAB)
self.check_template_found_correctly('<=', 0, self.templateAB)
self.check_template_found_correctly('>', 0, no_template)
self.check_template_found_correctly('<', 0, no_template)
self.check_template_found_correctly('!=', 0, no_template)
self.pickingInA.action_assign()
# All variants still have zero unreserved stock
self.check_variants_found_correctly('=', 0, all_variants)
self.check_variants_found_correctly('>=', 0, all_variants)
self.check_variants_found_correctly('<=', 0, all_variants)
self.check_variants_found_correctly('>', 0, no_variants)
self.check_variants_found_correctly('<', 0, no_variants)
self.check_variants_found_correctly('!=', 0, no_variants)
self.check_template_found_correctly('=', 0, self.templateAB)
self.check_template_found_correctly('>=', 0, self.templateAB)
self.check_template_found_correctly('<=', 0, self.templateAB)
self.check_template_found_correctly('>', 0, no_template)
self.check_template_found_correctly('<', 0, no_template)
self.check_template_found_correctly('!=', 0, no_template)
self.pickingInA.button_validate()
# product A has 2 unreserved stock, other variants have 0
self.check_variants_found_correctly('=', 2, self.productA)
self.check_variants_found_correctly('=', 0, b_and_c)
self.check_variants_found_correctly('>', 0, self.productA)
self.check_variants_found_correctly('<', 0, no_variants)
self.check_variants_found_correctly('!=', 0, self.productA)
self.check_variants_found_correctly('!=', 1, all_variants)
self.check_variants_found_correctly('!=', 2, b_and_c)
self.check_variants_found_correctly('<=', 0, b_and_c)
self.check_variants_found_correctly('<=', 1, b_and_c)
self.check_variants_found_correctly('>=', 0, all_variants)
self.check_variants_found_correctly('>=', 1, self.productA)
self.check_template_found_correctly('=', 0, self.templateAB)
self.check_template_found_correctly('=', 1, no_template)
self.check_template_found_correctly('=', 2, self.templateAB)
self.check_template_found_correctly('!=', 0, self.templateAB)
self.check_template_found_correctly('!=', 1, self.templateAB)
self.check_template_found_correctly('!=', 2, self.templateAB)
self.check_template_found_correctly('>', -1, self.templateAB)
self.check_template_found_correctly('>', 0, self.templateAB)
self.check_template_found_correctly('>', 1, self.templateAB)
self.check_template_found_correctly('>', 2, no_template)
self.check_template_found_correctly('<', 3, self.templateAB)
self.check_template_found_correctly('<', 2, self.templateAB)
self.check_template_found_correctly('<', 1, self.templateAB)
self.check_template_found_correctly('<', 0, no_template)
self.check_template_found_correctly('>=', 0, self.templateAB)
self.check_template_found_correctly('>=', 1, self.templateAB)
self.check_template_found_correctly('>=', 2, self.templateAB)
self.check_template_found_correctly('>=', 3, no_template)
self.check_template_found_correctly('<=', 3, self.templateAB)
self.check_template_found_correctly('<=', 2, self.templateAB)
self.check_template_found_correctly('<=', 1, self.templateAB)
self.check_template_found_correctly('<=', 0, self.templateAB)
self.check_template_found_correctly('<=', -1, no_template)
self.pickingInB.action_done()
# product A has 2 unreserved, product B has 3 unreserved and
# the remaining variant has 0
self.check_variants_found_correctly('=', 2, self.productA)
self.check_variants_found_correctly('=', 3, self.productB)
self.check_variants_found_correctly('=', 0, self.productC)
self.check_variants_found_correctly('>', 0, a_and_b)
self.check_variants_found_correctly('<', 0, no_variants)
self.check_variants_found_correctly('!=', 0, a_and_b)
self.check_variants_found_correctly('!=', 1, all_variants)
self.check_variants_found_correctly('!=', 2, b_and_c)
self.check_variants_found_correctly('!=', 3, a_and_c)
self.check_variants_found_correctly('<=', 0, self.productC)
self.check_variants_found_correctly('<=', 1, self.productC)
self.check_variants_found_correctly('>=', 0, all_variants)
self.check_variants_found_correctly('>=', 1, a_and_b)
self.check_variants_found_correctly('>=', 2, a_and_b)
self.check_variants_found_correctly('>=', 3, self.productB)
self.check_variants_found_correctly('>=', 4, no_variants)
self.check_template_found_correctly('=', 0, self.templateAB)
self.check_template_found_correctly('=', 1, no_template)
self.check_template_found_correctly('=', 2, self.templateAB)
self.check_template_found_correctly('=', 3, self.templateAB)
self.check_template_found_correctly('!=', 0, self.templateAB)
self.check_template_found_correctly('!=', 2, self.templateAB)
self.check_template_found_correctly('!=', 3, self.templateAB)
self.check_template_found_correctly('>', 1, self.templateAB)
self.check_template_found_correctly('>', 2, self.templateAB)
# This part may seem a bit unintuitive, but this is the
# way it works in the Odoo core
# Searches are "deferred" to the variants, so while the template says
# it has a stock of 5, searching for a stock greater than 3 will not
# find anything because no singular variant has a higher stock
self.check_template_found_correctly('>', 3, no_template)
self.check_template_found_correctly('<', 3, self.templateAB)
self.check_template_found_correctly('<', 2, self.templateAB)
self.check_template_found_correctly('<', 1, self.templateAB)
self.check_template_found_correctly('<', 0, no_template)
| Vauxoo/stock-logistics-warehouse | stock_available_unreserved/tests/test_stock_available_unreserved.py | Python | agpl-3.0 | 16,208 |
from __future__ import unicode_literals
from .. import unittest
from compose.service import Service
from compose.project import Project
from compose.container import Container
from compose import config
import mock
import docker
class ProjectTest(unittest.TestCase):
def test_from_dict(self):
project = Project.from_dicts('composetest', [
{
'name': 'web',
'image': 'busybox:latest'
},
{
'name': 'db',
'image': 'busybox:latest'
},
], None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_from_dict_sorts_in_dependency_order(self):
project = Project.from_dicts('composetest', [
{
'name': 'web',
'image': 'busybox:latest',
'links': ['db'],
},
{
'name': 'db',
'image': 'busybox:latest',
'volumes_from': ['volume']
},
{
'name': 'volume',
'image': 'busybox:latest',
'volumes': ['/tmp'],
}
], None)
self.assertEqual(project.services[0].name, 'volume')
self.assertEqual(project.services[1].name, 'db')
self.assertEqual(project.services[2].name, 'web')
def test_from_config(self):
dicts = config.from_dictionary({
'web': {
'image': 'busybox:latest',
},
'db': {
'image': 'busybox:latest',
},
})
project = Project.from_dicts('composetest', dicts, None)
self.assertEqual(len(project.services), 2)
self.assertEqual(project.get_service('web').name, 'web')
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
self.assertEqual(project.get_service('db').name, 'db')
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
def test_get_service(self):
web = Service(
project='composetest',
name='web',
client=None,
image="busybox:latest",
)
project = Project('test', [web], None)
self.assertEqual(project.get_service('web'), web)
def test_get_services_returns_all_services_without_args(self):
web = Service(
project='composetest',
name='web',
)
console = Service(
project='composetest',
name='console',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(), [web, console])
def test_get_services_returns_listed_services_with_args(self):
web = Service(
project='composetest',
name='web',
)
console = Service(
project='composetest',
name='console',
)
project = Project('test', [web, console], None)
self.assertEqual(project.get_services(['console']), [console])
def test_get_services_with_include_links(self):
db = Service(
project='composetest',
name='db',
)
web = Service(
project='composetest',
name='web',
links=[(db, 'database')]
)
cache = Service(
project='composetest',
name='cache'
)
console = Service(
project='composetest',
name='console',
links=[(web, 'web')]
)
project = Project('test', [web, db, cache, console], None)
self.assertEqual(
project.get_services(['console'], include_deps=True),
[db, web, console]
)
def test_get_services_removes_duplicates_following_links(self):
db = Service(
project='composetest',
name='db',
)
web = Service(
project='composetest',
name='web',
links=[(db, 'database')]
)
project = Project('test', [web, db], None)
self.assertEqual(
project.get_services(['web', 'db'], include_deps=True),
[db, web]
)
def test_use_volumes_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
mock_client = mock.create_autospec(docker.Client)
mock_client.inspect_container.return_value = container_dict
project = Project.from_dicts('test', [
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['aaa']
}
], mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id])
def test_use_volumes_from_service_no_container(self):
container_name = 'test_vol_1'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_dicts('test', [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['vol']
}
], mock_client)
self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name])
@mock.patch.object(Service, 'containers')
def test_use_volumes_from_service_container(self, mock_return):
container_ids = ['aabbccddee', '12345']
mock_return.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids]
project = Project.from_dicts('test', [
{
'name': 'vol',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'volumes_from': ['vol']
}
], None)
self.assertEqual(project.get_service('test')._get_volumes_from(), container_ids)
def test_use_net_from_container(self):
container_id = 'aabbccddee'
container_dict = dict(Name='aaa', Id=container_id)
mock_client = mock.create_autospec(docker.Client)
mock_client.inspect_container.return_value = container_dict
project = Project.from_dicts('test', [
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], mock_client)
service = project.get_service('test')
self.assertEqual(service._get_net(), 'container:'+container_id)
def test_use_net_from_service(self):
container_name = 'test_aaa_1'
mock_client = mock.create_autospec(docker.Client)
mock_client.containers.return_value = [
{
"Name": container_name,
"Names": [container_name],
"Id": container_name,
"Image": 'busybox:latest'
}
]
project = Project.from_dicts('test', [
{
'name': 'aaa',
'image': 'busybox:latest'
},
{
'name': 'test',
'image': 'busybox:latest',
'net': 'container:aaa'
}
], mock_client)
service = project.get_service('test')
self.assertEqual(service._get_net(), 'container:'+container_name)
| hypriot/compose | tests/unit/project_test.py | Python | apache-2.0 | 8,045 |
import os
import bpy
import zipfile
import re
import xml.etree.ElementTree as ET
import base64
from .shading import *
from ..utils import *
def get_mecafig(context):
ob = context.active_object
childs = []
mecafig = []
if ob is not None:
if ob.type == 'ARMATURE':
if not ob.mecafig.name == '':
return ob
#mecafig.append(ob)
elif ob.type == 'MESH':
if ob.mecafig.geometry.name in MECAFIG:
if ob.parent:
if ob.parent.type == 'ARMATURE':
if not ob.parent.mecafig.name == '':
return ob.parent
#mecafig.append(ob.parent)
else:
return None
else:
return None
# for child in mecafig.children:
# if child.type == 'MESH':
# if child.mecafig.name in MECAFIG:
# childs.append(child)
#
# mecafig.append(childs)
#
# return mecafig
def set_scene_properties(context):
scene = context.scene
scene_data = scene.mecafig.shading.panels
for elem in SHADING:
if not elem in scene_data.keys():
new_elem = scene_data.add()
new_elem.name = elem
if elem == 'Base':
scene_data[elem].show_panel = True
return {'FINISHED'}
def set_armature_properties(object):
if object.type == 'ARMATURE':
data = object.mecafig.armature
parts_data = data.parts
for part in MECAFIG:
if not part in parts_data.keys():
new_part = parts_data.add()
new_part.name = part
return {'FINISHED'}
def select_mecafig(context, name):
ob = context.active_object
mecafig = None
pose_flag = False
if ob:
if ob.mode == 'POSE':
pose_flag = True
bpy.ops.object.posemode_toggle()
elif ob.mode == 'EDIT':
bpy.ops.object.editmode_toggle()
bpy.ops.object.select_all(action='DESELECT')
for object in bpy.data.objects:
if object.type == 'ARMATURE':
if object.mecafig.name == name:
object.select_set(True)
context.view_layer.objects.active = object
mecafig = object
if pose_flag:
bpy.ops.object.posemode_toggle()
# Focus on selected MecaFig
for area in context.screen.areas:
if area.type == 'VIEW_3D' and area.spaces[0].region_3d.view_perspective != 'CAMERA':
ctx = context.copy()
ctx['area'] = area
ctx['region'] = area.regions[-1]
bpy.ops.view3d.view_selected(ctx)
return mecafig
def set_mecafig_name(context, name):
ob = get_mecafig(context)
ob_name = ob.mecafig.name
# Get MecaFigs name in the Scene
mf_names = []
for obj in bpy.data.objects:
if obj.type == 'ARMATURE':
if not obj.mecafig.name == '':
mf_name = obj.mecafig.name
if mf_name not in mf_names:
mf_names.append(mf_name)
# Check name
if not name == '':
if name == ob_name:
new_name = ob_name
elif name in mf_names:
i = 2
new_name = '%s_%s' %(name, i)
while new_name in mf_names:
i += 1
new_name = '%s_%s' %(name, i)
else:
new_name = name
else:
new_name = ob_name
# Rename
ob.name = new_name
ob.data.name = '%s_Rig' % new_name
ob.mecafig.name = new_name
for child in ob.children:
for part in MECAFIG:
if child.mecafig.geometry.name == part:
ch_name = '%s_%s' %(new_name, part)
child.name = ch_name
child.mecafig.name = new_name
if child.active_material:
mat = child.active_material
mat.name = ch_name
mat.mecafig.name = new_name
if ob.users_collection:
col = ob.users_collection[0]
col.name = new_name
return new_name
def add_mecafig(context):
blend_dir = ADDON_DIR + '/files/mecafig.blend'
ob = context.active_object
# Set 3D View Mode to 'OBJECT'
if ob:
if ob.mode == 'POSE':
bpy.ops.object.posemode_toggle()
elif ob.mode == 'EDIT':
bpy.ops.object.editmode_toggle()
# Get MecaFigs name in the Scene
mf_names = []
for obj in bpy.data.objects:
if obj.type == 'ARMATURE':
if not obj.mecafig.name == '':
mf_name = obj.mecafig.name
if mf_name not in mf_names:
mf_names.append(mf_name)
# Get new MecaFig name
i = 1
name = 'MecaFig_%s' % i
while name in mf_names:
i += 1
name = 'MecaFig_%s' % i
# Import MecaFig
file = COLLECTION
bpy.ops.wm.append(directory=blend_dir + '/Collection/', filename=file)
# Set active object
objects = context.selected_objects
parent = None
childs = []
trash = []
bpy.ops.object.select_all(action='DESELECT')
for object in objects:
if object.type == 'ARMATURE':
object.select_set(True)
context.view_layer.objects.active = object
parent = object
for child in object.children:
if child.mecafig.geometry.name in MECAFIG:
childs.append(child)
for child in childs:
for part in MECAFIG:
if child.mecafig.geometry.name == part:
# Mesh
if not child.data.name in MECAFIG[part]['meshes']:
ch_data = child.data
ch_data_name = ch_data.name.split('.')[0]
for mesh in MECAFIG[part]['meshes']:
if ch_data_name == mesh:
if mesh in bpy.data.meshes.keys():
child.data = bpy.data.meshes[mesh]
if not ch_data in trash:
trash.append(ch_data)
# Material
if not MATERIAL in bpy.data.materials.keys():
# Import material
dir = blend_dir + '/Material/'
bpy.ops.wm.append(directory=dir, filename=MATERIAL)
bpy.data.materials[MATERIAL].use_fake_user = True
child.active_material = bpy.data.materials[MATERIAL].copy()
mat = child.active_material
nodes = mat.node_tree.nodes
# UV Maps
uv_maps = MECAFIG[part]['uv_maps']
if not uv_maps == []:
nodes['UV Maps'].uv_map = uv_maps[-1]
# Base Map
if part in ['Leg.L', 'Leg.R', 'Arm.L', 'Arm.R']:
if part.startswith('Leg'):
b_part = 'leg'
elif part.startswith('Arm'):
b_part = 'arm'
b_dir = ADDON_DIR + '/files/textures'
b_map = get_image(('%s_base.png' % b_part), b_dir)
nodes['UV Base'].uv_map = 'Base'
set_image(mat, 'Base', b_map, 'Non-Color', False)
# Normal Map
n_dir = ADDON_DIR + '/files/textures'
n_map = '%s_normal.png' % child.data.name.split('.')[0]
n_map = get_image(n_map, n_dir)
set_image(mat, 'Normal', n_map, 'Non-Color', True)
# Set Material Scale
scale = MECAFIG[part]['uv_scale']
nodes[NODE].inputs['UV Scale'].default_value = scale
#set_shading_properties(child, part)
for mesh in trash:
bpy.data.meshes.remove(mesh)
# Scale MecaFig by default to 10
parent.mecafig.armature.scale = 10
# Name MecaFig
context.scene.mecafig.name = name
# Set scene properties
set_scene_properties(context)
return select_mecafig(context, name)
def delete_mecafig(context):
ob = context.active_object
arm_list = []
mesh_list = []
lat_list = []
mat_list = []
if not ob.type == 'ARMATURE':
ob = ob.parent
if not ob.mecafig.name == '':
arm_list.append(ob.data)
col = ob.users_collection[0]
for child in ob.children:
if child.type == 'MESH':
if child.data not in mesh_list:
mesh_list.append(child.data)
elif child.type == 'LATTICE':
lat_list.append(child.data)
if child.active_material:
mat_list.append(child.active_material)
bpy.data.objects.remove(child)
bpy.data.objects.remove(ob)
bpy.data.collections.remove(col)
bpy.data.armatures.remove(arm_list[0])
for mesh in mesh_list:
if mesh.users == 0:
bpy.data.meshes.remove(mesh)
for lat in lat_list:
if lat.users == 0:
bpy.data.lattices.remove(lat)
for mat in mat_list:
bpy.data.materials.remove(mat)
bpy.ops.object.select_all(action='DESELECT')
return{'FINISHED'}
def extract_zipfile(filepath):
file = ''
extensions = ['.dae', '.mbx']
# ZIP
if zipfile.is_zipfile(filepath): # If True
# Open .zip file
with zipfile.ZipFile(filepath, 'r') as zip:
# Variables
zip_files = zip.namelist()
zip_files_count = len(zip_files)
#
if zip_files_count > 0:
# Read 'zip' files
for zip_file in zip_files:
for ext in extensions:
if zip_file.endswith(ext):
file = zip_file
# Extract
zip.extract(zip_file, os.path.dirname(filepath))
return file
def extract_from_collada(self, context, filepath, collada_file):
exp = True
filepath = os.path.dirname(filepath) + '/' + collada_file
objects = {}
if exp:
tree = ET.parse(filepath) # Parse XML file
root = tree.getroot() # Get ROOT
for part in MECAFIG:
objects[part] = []
meshes = MECAFIG[part]['meshes']
for child in root:
if 'library_visual_scenes' in child.tag:
for node in child[0]:
matrix = []
for elem in node:
if 'matrix' in elem.tag:
matrix = elem.text.split()
if 'instance_geometry' in elem.tag:
mesh_name = elem.attrib['url'].replace('#', '').split('-')[0]
for mesh in meshes:
r = r'%s(uv|UV)?\d?' % mesh
if re.fullmatch(r, mesh_name):
mat = elem[0][0][0].attrib['symbol'].split('-')[0].split(':')
mat_list = [[mat[0]], mat[1], mat[3]]
object = [mesh_name, matrix, mat_list, 1]
objects[part].append(object)
else:
# Import Collada file
bpy.ops.object.select_all(action = 'DESELECT')
bpy.ops.wm.collada_import(filepath = filepath)
# Get objects
objects = [ob for ob in context.selected_objects]
for part in MECAFIG:
for ob in objects:
mesh_name = ob.data.name.split('.')[0]
for mesh in MECAFIG[part]['meshes']:
r = r'%s(uv|UV)?\d?' % mesh
if re.fullmatch(r, mesh_name):
mat_name = ob.active_material.name.split('.')[0].split(':')
mat_list = [mat_name[0], mat_name[1], mat_name[3]]
settings[part] = [mesh_name, mat_list]
# Remove objects
mesh_list = []
mat_list = []
for ob in objects:
if ob.data not in mesh_list:
mesh_list.append(ob.data)
if ob.active_material not in mat_list:
mat_list.append(ob.active_material)
bpy.data.objects.remove(ob)
for mesh in mesh_list:
bpy.data.meshes.remove(mesh)
for mat in mat_list:
bpy.data.materials.remove(mat)
return objects
def extract_from_zmbx(self, context, filepath, zmbx_file):
ob_dict = {}
file_path = os.path.dirname(filepath)
debug = False
def decode_image(image_string, filepath, image_name):
data = bytes(image_string, 'utf-8')
name = image_name + '.png'
with open(filepath + name, 'wb') as image:
image.write(base64.decodebytes(data))
return image
# Set .zmbx file as a variable
filepath = os.path.dirname(filepath) + '/' + zmbx_file
with open(filepath, 'r') as f:
scene = eval(f.read().replace('null', 'None').replace('false', 'False').replace('true', 'True'))
# ZMBX Version [2.0.0]
if 'metadata' in scene:
objects = scene['parts']
textures = scene['textures']
for part in MECAFIG:
if debug:
print(part)
i = 0
ob_dict[part] = []
for ob in objects:
config = ob['configuration']
# Check if this object is a MiniFig part
for mesh in MECAFIG[part]['meshes']:
r = r'%s(uv|UV)?\d?' % mesh # Regex
mesh_name = config.split('.')[0]
if re.fullmatch(r, mesh_name):
version = ob['version']
scope = ob['scope']
matrix = ob['matrix']
material = ob['material']
images = textures[str(version)]
# ### Material ###
mat_list = []
# Base
base = material['base']
mat_list.append(base)
# ### Maps ###
maps = material['decoration']
# UV Map
if 'uv' in maps:
uv = maps['uv']
uv_map = '%s%s' %(config, ('uv%s' %('' if uv == 1 else str(uv))))
else:
uv_map = config.split('.')[0]
# Decoration / Color
if version == 1:
map = 'decoration'
else:
map = 'color'
if map in maps:
dec = maps[map]
dec_name = dec['name'].split('.')[0]
if version == 1:
dec_images = images[map]
dec_path = file_path + '/maps/diffuse/'
else:
dec_images = images[scope][map]
dec_path = file_path + '/maps/color/'
dec_image = dec_images[dec['name']]
# Check if path exists
if not os.path.exists(dec_path):
os.makedirs(dec_path)
# Create image
decode_image(dec_image, dec_path, dec_name)
mat_list.append(dec_name)
else:
mat_list.append('')
# Metalness / Data
if version == 1:
map = 'metalness'
else:
map = 'data'
if map in maps:
met = maps[map]
met_name = met['name'].split('.')[0] + '_data'
if version == 1:
met_images = images[map]
met_path = file_path + '/maps/metalness/'
else:
met_images = images[scope][map]
met_path = file_path + '/maps/data/'
met_image = met_images[met['name']]
# Check if path exists
if not os.path.exists(met_path):
os.makedirs(met_path)
# Create image
decode_image(met_image, met_path, met_name)
mat_list.append(met_name)
else:
mat_list.append('')
ob_list = [uv_map, matrix, mat_list, version]
ob_dict[part].append(ob_list)
if debug:
i += 1
print(i, ob_list)
return ob_dict
# ZMBX Version 1
else:
objects = scene[0]
materials = scene[2]
images = scene[3]
mesh_names = scene[5]
for part in MECAFIG:
ob_dict[part] = []
for ob in objects:
mesh_id = ob[0]
mesh_name = mesh_names[mesh_id]
for mesh in MECAFIG[part]['meshes']:
r = r'%s(uv|UV)?\d?' % mesh
if re.fullmatch(r, mesh_name):
# Matrix
matrix = ob[1]
# Material
mat_id = ob[2][0]
mat = materials[mat_id]
mat_list = []
# Color ID
color_id = [str(mat[0])]
mat_list.append(color_id)
# Decoration Map
d_map_id = mat[2]
if d_map_id is None:
mat_list.append('')
else:
d_images = images[1]
d_map_str = d_images[d_map_id]
d_fp = os.path.dirname(filepath) + '/maps/diffuse/'
if not os.path.exists(d_fp):
os.makedirs(d_fp)
d_map_name = '%smb%s' %(mesh, ob[3])
decode_image(d_map_str, d_fp, d_map_name)
mat_list.append(d_map_name)
# Metalness Map
m_map_id = mat[4]
if m_map_id is None:
mat_list.append('')
else:
m_images = images[2]
m_map_str = m_images[m_map_id]
m_fp = os.path.dirname(filepath) + '/maps/metalness/'
if not os.path.exists(m_fp):
os.makedirs(m_fp)
m_map_name = '%smb%s_metal' %(mesh, ob[3])
decode_image(m_map_str, m_fp, m_map_name)
mat_list.append(m_map_name)
ob_dict[part].append([mesh_name, matrix, mat_list, 1])
return ob_dict
def parse_objects(objects):
debug = False
figs = {}
thd = [1, 2.6, 1]
vecs = {
'Leg.L': [[4, 11.2, 0]],
'Leg.R': [[-4, 11.2, 0]],
'Hip': [[0, 16, 0]],
'Body': [[0, 16, 0]],
'Arm.L': [[6.265, 24.99, 0]],
'Arm.R': [[-6.265, 24.99, 0]],
'Hand.L': [[8.265, 19.646,10.988], [8.255, 13.345, 5.35]],
'Hand.R': [[-8.265, 19.646,10.988], [-8.255, 13.345, 5.35]],
'Head': [[0, 28.8, 0]]
}
def compare_matrix(from_matrix, vector, to_matrix, threshold, debug):
f_mx = from_matrix
t_mx = to_matrix
thd = threshold
true = 0
# For X, Y & Z
for i in range(0, 3):
j = i * 4
# Define Min & Max
min = max = float(f_mx[j + 3])
for k in range(0, 3):
min += (float(f_mx[j + k]) * vector[k])
max += (float(f_mx[j + k]) * vector[k])
# Compare Value with Min & Max
min = round(min, 2) - thd[i]
max = round(max, 2) + thd[i]
value = round(float(t_mx[j + 3]), 2)
if min <= value <= max or max <= value <= min:
true += 1
# Debug
if debug:
print(i, true, min, value, max)
# Return comparison result
if true == 3:
return True
else:
return False
for i, hip in enumerate(objects['Hip']):
figs[i] = {}
for part in MECAFIG:
bool = False
# Debug
if debug:
print(part)
if part == 'Hip':
figs[i][part] = hip
# Debug
if debug:
print(hip)
else:
for vec in vecs[part]:
if part in ['Arm.L', 'Arm.R', 'Head']:
if 'Body' in figs[i]:
f_mx = figs[i]['Body'][1]
v = [i - j for i, j in zip(vec, vecs['Body'][0])]
t = thd
else:
break
elif part == 'Hand.L':
if 'Arm.L' in figs[i]:
f_mx = figs[i]['Arm.L'][1]
v = [i - j for i, j in zip(vec, vecs['Arm.L'][0])]
t = [i + j for i, j in zip(thd, [2.1, 4.2, 2.7])]
else:
break
elif part == 'Hand.R':
if 'Arm.R' in figs[i]:
f_mx = figs[i]['Arm.R'][1]
v = [i - j for i, j in zip(vec, vecs['Arm.R'][0])]
t = [i + j for i, j in zip(thd, [2.1, 4.2, 2.7])]
else:
break
else:
f_mx = hip[1]
v = [i - j for i, j in zip(vec, vecs['Hip'][0])]
t = thd
for elem in objects[part]:
t_mx = elem[1] # elem [Matrix]
bool = compare_matrix(f_mx, v, t_mx, t, debug)
if bool:
figs[i][part] = elem
# Debug
if debug:
print(elem)
if part in ['Hand.L', 'Hand.R']:
for p in ['Hand.L', 'Hand.R']:
objects[p].remove(elem)
else:
objects[part].remove(elem)
break
if bool:
break
return figs
def add_mecafig_from_file(self, context, filepath, count):
scene = context.scene
asf = scene.mecafig.shading.apply_settings_for
# Get / Set 'Apply Settings For'
if not asf == 'ACTIVE':
get_asf = asf
scene.mecafig.shading.apply_settings_for = 'ACTIVE'
else:
get_asf = 'ACTIVE'
# Extract file from ZIP
file = extract_zipfile(filepath)
file_name = os.path.basename(filepath).split('.')[0]
figs = {}
if file.endswith('.dae'):
figs = extract_from_collada(self, context, filepath, file)
elif file.endswith('.mbx'):
figs = extract_from_zmbx(self, context, filepath, file)
# Parse MiniFig(s)
figs = parse_objects(figs)
# For each MiniFig
for i in figs:
# Add MecaFig
mecafig = add_mecafig(context)
mecafig.location[0] += count * 40
mecafig.location[1] += i * 40
for part in MECAFIG:
for ob in mecafig.children:
ob_data = ob.mecafig
if ob_data.geometry.name == part:
# Set object as active
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
context.view_layer.objects.active = ob
if part in figs[i]:
# Set Mesh
if part in ['Leg.L', 'Leg.R', 'Body', 'Head']:
ob_mesh = figs[i][part][0]
for mesh in MECAFIG[part]['meshes']:
if mesh in ob_mesh:
ob_data.geometry.mesh = mesh
# Set Material
ob_mat = ob.active_material
ob_fig = figs[i][part]
set_shading(ob_mat, ob_fig, filepath)
# Rename New MecaFig
context.scene.mecafig.name = file_name
# Set 'Apply Setting For'
scene.mecafig.shading.apply_settings_for = get_asf
return {'FINISHED'}
| feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/mecafig/functions/mecafig.py | Python | gpl-3.0 | 25,383 |
VERSION = (0, 10, 0)
| xgfone/homepage | homepage/homepage/django_packages/taggit/__init__.py | Python | bsd-3-clause | 21 |
import re
from autotest.client.shared import error
from virttest import utils_misc, aexpect, storage, data_dir
def run_pci_hotplug(test, params, env):
"""
Test hotplug of PCI devices.
(Elements between [] are configurable test parameters)
1) PCI add a deivce (NIC / block)
2) Compare output of monitor command 'info pci'.
3) Compare output of guest command [reference_cmd].
4) Verify whether pci_model is shown in [pci_find_cmd].
5) Check whether the newly added PCI device works fine.
6) PCI delete the device, verify whether could remove the PCI device.
@param test: QEMU test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Modprobe the module if specified in config file
module = params.get("modprobe_module")
if module:
session.cmd("modprobe %s" % module)
# Get output of command 'info pci' as reference
info_pci_ref = vm.monitor.info("pci")
# Get output of command as reference
reference = session.cmd_output(params["reference_cmd"])
tested_model = params["pci_model"]
test_type = params["pci_type"]
image_format = params.get("image_format_stg")
# Probe qemu to verify what is the supported syntax for PCI hotplug
cmd_output = vm.monitor.cmd("?")
if len(re.findall("\ndevice_add", cmd_output)) > 0:
cmd_type = "device_add"
elif len(re.findall("\npci_add", cmd_output)) > 0:
cmd_type = "pci_add"
else:
raise error.TestError("Unknow version of qemu")
# Determine syntax of drive hotplug
# __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0:
drive_cmd_type = "__com.redhat_drive_add"
# drive_add == qemu-kvm-0.13 onwards
elif len(re.findall("\ndrive_add", cmd_output)) > 0:
drive_cmd_type = "drive_add"
else:
raise error.TestError("Unknow version of qemu")
# Probe qemu for a list of supported devices
devices_support = vm.monitor.cmd("%s ?" % cmd_type)
if cmd_type == "pci_add":
if test_type == "nic":
pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = storage.get_image_filename(image_params,
data_dir.get_data_dir())
pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
(image_filename, tested_model))
# Execute pci_add (should be replaced by a proper monitor method call)
add_output = vm.monitor.cmd(pci_add_cmd)
if not "OK domain" in add_output:
raise error.TestFail("Add PCI device failed. "
"Monitor command is: %s, Output: %r" %
(pci_add_cmd, add_output))
after_add = vm.monitor.info("pci")
elif cmd_type == "device_add":
driver_id = test_type + "-" + utils_misc.generate_random_id()
device_id = test_type + "-" + utils_misc.generate_random_id()
if test_type == "nic":
if tested_model == "virtio":
tested_model = "virtio-net-pci"
pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
tested_model)
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = storage.get_image_filename(image_params,
data_dir.get_data_dir())
controller_model = None
if tested_model == "virtio":
tested_model = "virtio-blk-pci"
if tested_model == "scsi":
tested_model = "scsi-disk"
controller_model = "lsi53c895a"
if len(re.findall(controller_model, devices_support)) == 0:
raise error.TestError("scsi controller device (%s) not "
"supported by qemu" %
controller_model)
if controller_model is not None:
controller_id = "controller-" + device_id
controller_add_cmd = ("device_add %s,id=%s" %
(controller_model, controller_id))
vm.monitor.cmd(controller_add_cmd)
if drive_cmd_type == "drive_add":
driver_add_cmd = ("drive_add auto "
"file=%s,if=none,id=%s,format=%s" %
(image_filename, driver_id, image_format))
elif drive_cmd_type == "__com.redhat_drive_add":
driver_add_cmd = ("__com.redhat_drive_add "
"file=%s,format=%s,id=%s" %
(image_filename, image_format, driver_id))
pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
(device_id, tested_model, driver_id))
vm.monitor.cmd(driver_add_cmd)
# Check if the device is support in qemu
if len(re.findall(tested_model, devices_support)) > 0:
add_output = vm.monitor.cmd(pci_add_cmd)
else:
raise error.TestError("%s doesn't support device: %s" %
(cmd_type, tested_model))
after_add = vm.monitor.info("pci")
if not device_id in after_add:
raise error.TestFail("Add device failed. Monitor command is: %s"
". Output: %r" % (pci_add_cmd, add_output))
# Define a helper function to delete the device
def pci_del(ignore_failure=False):
if cmd_type == "pci_add":
result_domain, bus, slot, _ = add_output.split(',')
domain = int(result_domain.split()[2])
bus = int(bus.split()[1])
slot = int(slot.split()[1])
pci_addr = "%x:%x:%x" % (domain, bus, slot)
cmd = "pci_del pci_addr=%s" % pci_addr
elif cmd_type == "device_add":
cmd = "device_del %s" % device_id
# This should be replaced by a proper monitor method call
vm.monitor.cmd(cmd)
def device_removed():
after_del = vm.monitor.info("pci")
return after_del != after_add
if (not utils_misc.wait_for(device_removed, 10, 0, 1)
and not ignore_failure):
raise error.TestFail("Failed to hot remove PCI device: %s. "
"Monitor command: %s" %
(tested_model, cmd))
try:
# Compare the output of 'info pci'
if after_add == info_pci_ref:
raise error.TestFail("No new PCI device shown after executing "
"monitor command: 'info pci'")
# Define a helper function to compare the output
def new_shown():
o = session.cmd_output(params["reference_cmd"])
return o != reference
secs = int(params["wait_secs_for_hook_up"])
if not utils_misc.wait_for(new_shown, 30, secs, 3):
raise error.TestFail("No new device shown in output of command "
"executed inside the guest: %s" %
params.get("reference_cmd"))
# Define a helper function to catch PCI device string
def find_pci():
o = session.cmd_output(params["find_pci_cmd"])
return params["match_string"] in o
if not utils_misc.wait_for(find_pci, 30, 3, 3):
raise error.TestFail("PCI %s %s device not found in guest. "
"Command was: %s" %
(tested_model, test_type,
params.get("find_pci_cmd")))
# Test the newly added device
try:
session.cmd(params["pci_test_cmd"])
except aexpect.ShellError, e:
raise error.TestFail("Check for %s device failed after PCI "
"hotplug. Output: %r" % (test_type, e.output))
session.close()
except Exception:
pci_del(ignore_failure=True)
raise
else:
pci_del()
| suqinhuang/virt-test | qemu/tests/pci_hotplug.py | Python | gpl-2.0 | 8,562 |
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Result Codes."""
import dns.exception
from ._compat import long
NOERROR = 0
FORMERR = 1
SERVFAIL = 2
NXDOMAIN = 3
NOTIMP = 4
REFUSED = 5
YXDOMAIN = 6
YXRRSET = 7
NXRRSET = 8
NOTAUTH = 9
NOTZONE = 10
BADVERS = 16
_by_text = {
'NOERROR': NOERROR,
'FORMERR': FORMERR,
'SERVFAIL': SERVFAIL,
'NXDOMAIN': NXDOMAIN,
'NOTIMP': NOTIMP,
'REFUSED': REFUSED,
'YXDOMAIN': YXDOMAIN,
'YXRRSET': YXRRSET,
'NXRRSET': NXRRSET,
'NOTAUTH': NOTAUTH,
'NOTZONE': NOTZONE,
'BADVERS': BADVERS
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be a true inverse.
_by_value = dict((y, x) for x, y in _by_text.items())
class UnknownRcode(dns.exception.DNSException):
"""A DNS rcode is unknown."""
def from_text(text):
"""Convert text into an rcode.
@param text: the textual rcode
@type text: string
@raises UnknownRcode: the rcode is unknown
@rtype: int
"""
if text.isdigit():
v = int(text)
if v >= 0 and v <= 4095:
return v
v = _by_text.get(text.upper())
if v is None:
raise UnknownRcode
return v
def from_flags(flags, ednsflags):
"""Return the rcode value encoded by flags and ednsflags.
@param flags: the DNS flags
@type flags: int
@param ednsflags: the EDNS flags
@type ednsflags: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: int
"""
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev)
def to_text(value):
"""Convert rcode into text.
@param value: the rcode
@type value: int
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
| burzillibus/RobHome | venv/lib/python2.7/site-packages/dns/rcode.py | Python | mit | 3,104 |
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-13 Jim Easterbrook jim@jim-easterbrook.me.uk
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library
(version 1.0). Alternative modules, :doc:`pywws.device_pyusb`,
:doc:`pywws.device_ctypes_hidapi`, and
:doc:`pywws.device_cython_hidapi`, use different libraries. The choice
of which module to use depends on which libraries are available for
your computer.
Users of recent versions of Mac OS have less choice. The operating
system makes it very difficult to access HID devices (such as the
weather station) directly, so the ``hidapi`` library has to be used.
Installation
============
Some of this software may already be installed on your machine, so do
check before downloading sources and compiling them yourself.
#. Install libusb and PyUSB.
These should be available as packages for your operating system,
but their names may vary. For example, on Ubuntu Linux::
sudo apt-get install python-usb
On some embedded linux systems::
ipkg install libusb py25-usb
Testing
=======
Run ``TestWeatherStation.py`` with increased verbosity so it reports
which USB device access module is being used::
python TestWeatherStation.py -vv
18:28:09:pywws.WeatherStation.CUSBDrive:using pywws.device_pyusb1
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import sys
import usb.core
import usb.util
class USBDevice(object):
def __init__(self, idVendor, idProduct):
"""Low level USB device access via PyUSB 1.0 library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
self.dev = usb.core.find(idVendor=idVendor, idProduct=idProduct)
if not self.dev:
raise IOError("Weather station device not found")
if sys.platform.startswith('linux'):
try:
detach = self.dev.is_kernel_driver_active(0)
except NotImplementedError:
detach = True
if detach:
try:
self.dev.detach_kernel_driver(0)
except usb.core.USBError:
pass
self.dev.set_configuration()
self.dev.reset()
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.dev.read(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError('pywws.device_pyusb1.USBDevice.read_data failed')
return list(result)
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True
| kenmcc/mypywws | src/pywws/device_pyusb1.py | Python | gpl-2.0 | 5,457 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import os
import pickle
import random
import sha
import sys
import thread
import threading
import yaml
from google.appengine.api import datastore
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.mutable_request().set_contents(request.Encode())
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception().contents())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response().contents())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = sha.new(entity.entity().Encode()).digest()
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = remote_api_pb.PutRequest()
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'GetIDs', id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata[txid].thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
ALL_SERVICES = set([
'capability_service',
'datastore_v3',
'images',
'mail',
'memcache',
'taskqueue',
'urlfetch',
'xmpp',
])
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None,
save_cookies=False):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Args:
app_id: The app_id of your app, as declared in app.yaml.
path: The path to the remote_api handler for your app
(for example, '/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
save_cookies: Forwarded to rpc_server_factory function.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
if not app_id:
if not rtok:
random.seed()
rtok = str(random.random())[2:]
urlargs = {'rtok': rtok}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response recieved from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if app_info['rtok'] != rtok:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(rtok),
repr(app_info['rtok'])))
app_id = app_info['app_id']
if services is not None:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
else:
services = set(ALL_SERVICES)
os.environ['APPLICATION_ID'] = app_id
if default_auth_domain:
os.environ['AUTH_DOMAIN'] = default_auth_domain
elif 'AUTH_DOMAIN' not in os.environ:
os.environ['AUTH_DOMAIN'] = 'gmail.com'
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services:
services.remove('datastore_v3')
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi
| rev2004/android2cloud.app-engine | google_appengine/google/appengine/ext/remote_api/remote_api_stub.py | Python | mit | 19,484 |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import CameraInfo
def callback(data):
P = list(data.P);
P[3] = P[3] * scale
data.P = tuple(P);
pub.publish(data)
if __name__ == '__main__':
rospy.init_node('republish_camera_info', anonymous=True)
pub = rospy.Publisher('camera_info_out', CameraInfo, queue_size=1)
scale = rospy.get_param('~scale', 1.091664)
rospy.Subscriber("camera_info_in", CameraInfo, callback)
rospy.spin()
| introlab/rtabmap_ros | launch/jfr2018/republish_camera_info.py | Python | bsd-3-clause | 472 |
from collections import OrderedDict
import cms
from .subcommands.base import SubcommandsCommand
from .subcommands.check import CheckInstallation
from .subcommands.copy import CopyCommand
from .subcommands.delete_orphaned_plugins import DeleteOrphanedPluginsCommand
from .subcommands.list import ListCommand
from .subcommands.publisher_publish import PublishCommand
from .subcommands.tree import FixTreeCommand
from .subcommands.uninstall import UninstallCommand
class Command(SubcommandsCommand):
command_name = 'cms'
subcommands = OrderedDict((
('check', CheckInstallation),
('copy', CopyCommand),
('delete-orphaned-plugins', DeleteOrphanedPluginsCommand),
('fix-tree', FixTreeCommand),
('list', ListCommand),
('publisher-publish', PublishCommand),
('uninstall', UninstallCommand),
))
missing_args_message = 'one of the available sub commands must be provided'
subcommand_dest = 'cmd'
def get_version(self):
return cms.__version__
def add_arguments(self, parser):
parser.add_argument('--version', action='version', version=self.get_version())
super().add_arguments(parser)
| rsalmaso/django-cms | cms/management/commands/cms.py | Python | bsd-3-clause | 1,189 |
'''
title = "For loop simulated with a while loop"
level = "intro"
dificulty = "easy"
tags = ["iterators", "while-loop", "loops", "for-loop"]
description = """
Write a function that receives a list and prints every element in the list
using a while loop.
Example:
for_loop_simulated_with_while([3, 1, 2, 5])
> 3
> 1
> 2
> 5
"""
'''
def for_loop_simulated_with_while(a_list):
pass
| rmotr/whooshercises | assignments/for_simulated_with_while.py | Python | mit | 407 |
from odoo import fields, models, api
class ResCompany(models.Model):
_inherit = 'res.company'
url_policy = fields.Text(string="web privacy_policy", default='https://www.visiotechsecurity.com/en/legal-notice-and-the-privacy-policy',
help="link to the privacy policy", translate=True)
url_profile = fields.Text(string="web profile", default='https://www.visiotechsecurity.com/en/login/profile',
help="link to the profile in the web", translate=True)
url_products = fields.Text(string="web products", default='https://www.visiotechsecurity.com/en/',
help="link to section products", translate=True)
url_contact = fields.Text(string="contact number", help="Number contact depend on the country", translate=True)
| Comunitea/CMNT_004_15 | project-addons/custom_emails_company/models/company.py | Python | agpl-3.0 | 821 |
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
def string_add(v):
return v + "_Python234"
def string_filter(v):
return len(v) <= 3
def string_fm(v):
return v.split()
def json_add(v):
v["c"] = v["a"] + 235
return v
def json_filter(v):
return v["a"] <= 100
def json_fm(v):
return [v["a"], v["b"]]
# JSON serialization doesn't handle complex numbers, Timestamp
# IBM Java JSON deserialization can't handle uint64 bigger than Long.MAX_VALUE
def remove_complex(v):
r = dict(v)
del r['c32']
del r['c64']
del r['u64']
del r['lui64']
del r['ts']
return r
# JSON serialization doesn't handle sets, change them to a list
def change_set_to_list(v):
r = dict(v)
s = r['si32']
if isinstance(s, set):
del r['si32']
l = list(s)
r['si32'] = l
return r
| ibmkendrick/streamsx.topology | test/python/pubsub/pytest_funcs.py | Python | apache-2.0 | 832 |
"""
Counting Characters
"""
def main():
lower = "abcdefghijklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
count = [0 for i in xrange(26)]
while True:
try:
s = raw_input().strip()
for x in s:
if x.isalpha():
if x in lower: count[lower.index(x)] += 1
elif x in upper: count[upper.index(x)] += 1
except EOFError:
break
for i, x in enumerate(lower):
print "%s : %d" % (x, count[i])
if __name__ == '__main__':
main()
| curryudon08/aoj | Lesson/ITP1/ITP1_8_C.py | Python | unlicense | 565 |
"""Takes care of fetching data via HTTP."""
import urllib2
import ssl
from socket import error as SocketError
import time
import re
from tempfile import mkstemp
import os
ENABLE_DEBUG_PRINT = False
MAX_RETRY_TIME_IN_SEC = 5
NUM_SEC_TIMEOUT = 30
NUM_SEC_SLEEP = 0
def looks_like_version(version):
"""Returns whether the string looks like a legitimate version number
Example version strings:
* 1.0.0-rc4
* 1.0.0
"""
pattern = re.compile(r'^([\w\-]+\.)?([\w\-]+\.)?([\w\-]+)$')
return bool(pattern.match(version))
def get_possible_zip_urls(github_project_url, version):
"""Get likely download links to the .zip file for the specified version.
Examples:
https://github.com/blockchain/My-Wallet-V3/archive/v3.13.0.zip
https://github.com/cryptocoinjs/bigi/archive/1.4.1.zip
Args:
github_project_url (str): URL of project on GitHub.com
version (str):
Returs:
List[str]: A list of urls as strings
"""
assert isinstance(github_project_url, str)
assert looks_like_version(version)
assert github_project_url.startswith('https://github.com/')
github_project_url = github_project_url.rstrip('/')
urls = []
urls.append("%s/archive/v%s.zip" % (github_project_url, version))
urls.append("%s/archive/%s.zip" % (github_project_url, version))
return urls
def fetch_url(url, fetch_tmp_file=False):
"""Fetch contents of remote page as string for specified url.
Handles a variety of errors and retries with linearly increasing backoff.
Args:
url (str): The URL of the HTTP resource to be fetched.
fetch_tmp_file (bool): Disabled by default. If set to true, instead
of returning the contents of the HTTP response body as a string,
a file handle to a temporary file where the contents have been
output will be returned.
Returns:
str or filename, depending on setting of `fetch_tmp_file` arg.
"""
if NUM_SEC_SLEEP > 0:
time.sleep(NUM_SEC_SLEEP)
current_retry_time_in_sec = 0
dprint("Fetching url: %s" % url)
response = ''
while current_retry_time_in_sec <= MAX_RETRY_TIME_IN_SEC:
if current_retry_time_in_sec:
time.sleep(current_retry_time_in_sec)
try:
req = urllib2.urlopen(url=url, timeout=NUM_SEC_TIMEOUT)
if fetch_tmp_file:
return download_to_tmp(url, req)
else:
response = req.read()
if response is None:
raise Exception
return response
except (urllib2.HTTPError, ssl.SSLError, urllib2.URLError,
SocketError) as err:
dprint(str(err))
if hasattr(err, 'code') and err.code == 404:
raise #don't retry for HTTP 404
elif current_retry_time_in_sec == MAX_RETRY_TIME_IN_SEC:
print str(err)
raise
else:
current_retry_time_in_sec += 1
def download_to_tmp(url, req):
"""Download the data indicated in the request to a temp file.
This does not currently use a back-off in the event of an error, as I'm too
lazy to deal with the edge cases currently.
Args:
url (str): The URL that data is being fetched from.
req (`urllib.addinfourl`): The object returned from `urllib2.urlopen`.
Returns:
str: The filename of the temporary file written to.
Raises:
ValueError: Fails if the downloaded file appears incomplete.
"""
tmp_fd, tmp_filename = mkstemp()
downloaded = 0
content_len_str = req.info().getheader('Content-Length')
total_size = None
if content_len_str is not None:
total_size = int(content_len_str.strip())
if total_size == 0:
raise ValueError(("Size of file returned by request to '%s' is 0 bytes "
"according to HTTP response header.") % url)
CHUNK = 256 * 10240
with open(tmp_filename, 'w') as tmp_file:
while True:
data_chunk = req.read(CHUNK)
downloaded += len(data_chunk)
if not data_chunk:
break
tmp_file.write(data_chunk)
#https://www.logilab.org/blogentry/17873
os.close(tmp_fd)
if total_size is not None and downloaded != total_size:
os.remove(tmp_filename)
raise ValueError(('Incomplete download for %s. Expected %d bytes, '
'received %d.') % (url, total_size, downloaded))
return tmp_filename
def dprint(msg):
"""Debug print statements."""
if ENABLE_DEBUG_PRINT:
print "DEBUG: %s" % msg
| kristovatlas/npm-dependency-check | http.py | Python | unlicense | 4,694 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:39876")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:39876")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | fourty2/sinertaler | contrib/bitrpc/bitrpc.py | Python | mit | 7,837 |
import linecache
import os
import sys
from bson import ObjectId
from ereuse_utils.nested_lookup import NestedLookup
from flask import Config, json
from flask_caching import Cache
cache = Cache(config={'CACHE_TYPE': 'simple'})
def get_last_exception_info():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
def coerce_type(fields: dict):
"""
Similar to a Cerberus' coercion, adds a prefix to all @types accordingly.
:param fields: the resource (ex-JSON document) to coerce. The variable is updated.
"""
# todo this method should be general: obtaining which resources need to be prefixed from their schema
from ereuse_devicehub.resources.event.device import DeviceEventDomain
references = []
NestedLookup(fields, references, NestedLookup.key_equality_factory('@type'))
for document, ref_key in references:
document[ref_key] = DeviceEventDomain.add_prefix(document[ref_key])
def get_header_link(resource_type: str) -> ():
return 'Link', '<http://www.ereuse.org/onthology/' + resource_type + \
'.jsonld>; rel="http://www.w3.org/ns/json-ld#context"; type="application/ld+json"'
def get_json_from_file(filename: str, directory: str = None, parse_json=True, mode='r',
same_directory_as_file: str = None) -> dict:
"""
:param parse_json: Try to parse the json or only return the string?
:param mode: File opening mode. By default only read.
:type filename: str
:param directory: Optional. Directory to get the file from. If nothing, it is taken from .
:param same_directory_as_file: Optional. If supplied, directory is set to the same directory as the file, overriding
param *directory*.
:return: JSON dict
"""
if same_directory_as_file:
directory = os.path.dirname(os.path.realpath(same_directory_as_file))
with open(os.path.abspath(os.path.join(directory, filename)), mode=mode) as data_file:
value = json.load(data_file) if parse_json else data_file.read()
return value
class DeviceHubConfig(Config):
"""Configuration class for DeviceHub. We only extend it to add our settings when eve loads its settings."""
def from_object(self, obj):
super().from_object(obj) # 1. Load settings as normal
if obj == 'eve.default_settings':
super().from_object('ereuse_devicehub.default_settings') # 2. If those were eve's, then load ours
def url_for_resource(resource_name: str, _id: str or ObjectId, db: str = None, base_url: str = None) -> str:
"""Url for a resource"""
from ereuse_devicehub.resources.account.domain import AccountDomain
from flask import current_app
db = db or AccountDomain.requested_database
base_url = base_url or current_app.config['BASE_URL_FOR_AGENTS']
return '{}/{}/{}/{}'.format(base_url, db, current_app.config['DOMAIN'][resource_name]['url'], _id)
| eReuse/DeviceHub | ereuse_devicehub/utils.py | Python | agpl-3.0 | 3,149 |
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
from __future__ import absolute_import, print_function
import os
import sys
import platform
import warnings
from contextlib import contextmanager
import subprocess
import numpy as np
from numpy import (
array, arange, empty, zeros, int32, int64, uint16, complex_, float64, rec,
copy, ones_like, where, alltrue, linspace,
sum, prod, sqrt, fmod, floor, ceil,
sin, cos, tan, arcsin, arccos, arctan, arctan2,
sinh, cosh, tanh, arcsinh, arccosh, arctanh,
log, log1p, log10, exp, expm1, conj)
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from numpy import shape, allclose, array_equal, ravel, isnan, isinf
import numexpr
from numexpr import E, NumExpr, evaluate, re_evaluate, disassemble, use_vml
import unittest
TestCase = unittest.TestCase
double = np.double
if sys.version_info[0] >= 3:
long = int
# Recommended minimum versions
from distutils.version import LooseVersion
minimum_numpy_version = LooseVersion('1.7.0')
class test_numexpr(TestCase):
"""Testing with 1 thread"""
nthreads = 1
def setUp(self):
numexpr.set_num_threads(self.nthreads)
def test_simple(self):
ex = 2.0 * E.a + 3.0 * E.b * E.c
sig = [('a', double), ('b', double), ('c', double)]
func = NumExpr(ex, signature=sig)
x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9]))
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
func = NumExpr(E.a)
x = arange(100.0)
y = func(x)
assert_array_equal(x, y)
def test_simple_expr(self):
func = NumExpr(E.a)
x = arange(1e6)
y = func(x)
assert_array_equal(x, y)
def test_rational_expr(self):
func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b))
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = func(a, b)
assert_array_almost_equal(x, y)
def test_reductions(self):
# Check that they compile OK.
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=None)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', None)])
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=1)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', 1)])
assert_equal(disassemble(
NumExpr("prod(x**2+2, axis=2)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'prod_ddn', b'r0', b't3', 2)])
# Check that full reductions work.
x = zeros(100000) + .01 # checks issue #41
assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None))
assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0))
assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0))
assert_allclose(evaluate("min(x)"), np.min(x))
assert_allclose(evaluate("max(x,axis=0)"), np.max(x, axis=0))
# Fix for #277, array with leading singleton dimension
x = np.arange(10).reshape(1,10)
assert_allclose(evaluate("sum(x,axis=None)"), sum(x, axis=None) )
assert_allclose(evaluate("sum(x,axis=0)"), sum(x, axis=0) )
assert_allclose(evaluate("sum(x,axis=1)"), sum(x, axis=1) )
x = arange(10.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
x = arange(100.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
x = linspace(0.1, 1.0, 2000)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0))
assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0))
# Check that reductions along an axis work
y = arange(9.0).reshape(3, 3)
assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1))
assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0))
assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None))
assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1))
assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0))
assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None))
assert_allclose(evaluate("min(y**2, axis=1)"), np.min(y ** 2, axis=1))
assert_allclose(evaluate("min(y**2, axis=0)"), np.min(y ** 2, axis=0))
assert_allclose(evaluate("min(y**2, axis=None)"), np.min(y ** 2, axis=None))
assert_allclose(evaluate("max(y**2, axis=1)"), np.max(y ** 2, axis=1))
assert_allclose(evaluate("max(y**2, axis=0)"), np.max(y ** 2, axis=0))
assert_allclose(evaluate("max(y**2, axis=None)"), np.max(y ** 2, axis=None))
# Check integers
x = arange(10.)
x = x.astype(int)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
# Check longs
x = x.astype(long)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0))
assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0))
# Check complex
x = x + .1j
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
def test_in_place(self):
x = arange(10000.).reshape(1000, 10)
evaluate("x + 3", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) + 3)
y = arange(10)
evaluate("(x - 3) * y + (x - 3)", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1))
def test_axis(self):
y = arange(9.0).reshape(3, 3)
try:
evaluate("sum(y, axis=2)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
evaluate("sum(y, axis=-3)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
# Negative axis are not supported
evaluate("sum(y, axis=-1)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
def test_r0_reuse(self):
assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])),
[(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'),
(b'add_ddd', b'r0', b'r0', b'c2[2.0]')])
def test_str_contains_basic0(self):
res = evaluate('contains(b"abc", b"ab")')
assert_equal(res, True)
def test_str_contains_basic1(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(haystack, b"ab")')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic2(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(b"abcd", haystack)')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic3(self):
haystacks = array(
[b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc'])
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab'])
res = evaluate('contains(haystacks, needles)')
assert_equal(res, [True, True, False, False, False, True])
def test_str_contains_basic4(self):
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc',
b'abc '])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, False, False, False, False, True, True])
def test_str_contains_basic5(self):
needles = array(
[b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h'])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, True, True, False, True])
# Compare operation of Python 'in' operator with 'contains' using a
# product of two lists of strings.
def test_str_contains_listproduct(self):
from itertools import product
small = [
'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting',
' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a',
'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it',
' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ',
'it mut', 'terin', 'g to ', 'its', 'elf ', "'The",
' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws',
'! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g",
'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets',
' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d',
'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice',
' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ',
'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai',
'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ',
'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing',
' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh',
'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ',
'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in',
' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith',
' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle',
' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.']
big = [
'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro',
'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin',
'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ',
'it had', ' los', 't ', 'so', 'mething', '; and', ' she h',
'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ",
'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de',
'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'",
'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ',
'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha',
've dro', 'pped t', 'hem', ', I ', 'won', "der?' A",
'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i',
't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p',
'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ',
'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou',
't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere',
' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ',
'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ',
'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi',
'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,',
' had va', 'ni', 'shed co', 'mpletel', 'y.']
p = list(product(small, big))
python_in = [x[0] in x[1] for x in p]
a = [x[0].encode() for x in p]
b = [x[1].encode() for x in p]
res = [bool(x) for x in evaluate('contains(b, a)')]
assert_equal(res, python_in)
def test_str_contains_withemptystr1(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(b"abcd", withemptystr)')
assert_equal(res, [True, False, True])
def test_str_contains_withemptystr2(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(withemptystr, b"")')
assert_equal(res, [True, True, True])
def test_str_contains_long_needle(self):
a = b'1' + b'a' * 40
b = b'a' * 40
res = evaluate('contains(a, b)')
assert_equal(res, True)
def test_where_scalar_bool(self):
a = True
b = array([1, 2])
c = array([3, 4])
res = evaluate('where(a, b, c)')
assert_array_equal(res, b)
a = False
res = evaluate('where(a, b, c)')
assert_array_equal(res, c)
def test_refcount(self):
# Regression test for issue #310
a = array([1])
assert sys.getrefcount(a) == 2
evaluate('1')
assert sys.getrefcount(a) == 2
def test_locals_clears_globals(self):
# Check for issue #313, whereby clearing f_locals also clear f_globals
# if in the top-frame. This cannot be done inside `unittest` as it is always
# executing code in a child frame.
script = r';'.join([
r"import numexpr as ne",
r"a=10",
r"ne.evaluate('1')",
r"a += 1",
r"ne.evaluate('2', local_dict={})",
r"a += 1",
r"ne.evaluate('3', global_dict={})",
r"a += 1",
r"ne.evaluate('4', local_dict={}, global_dict={})",
r"a += 1",
])
# Raises CalledProcessError on a non-normal exit
check = subprocess.check_call([sys.executable, '-c', script])
# Ideally this test should also be done against ipython but it's not
# a requirement.
class test_numexpr2(test_numexpr):
"""Testing with 2 threads"""
nthreads = 2
class test_evaluate(TestCase):
def test_simple(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
x = arange(100.0)
y = evaluate("x")
assert_array_equal(x, y)
def test_simple_expr(self):
x = arange(1e6)
y = evaluate("x")
assert_array_equal(x, y)
def test_re_evaluate(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
x = re_evaluate()
assert_array_equal(x, array([86., 124., 168.]))
def test_re_evaluate_dict(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c", local_dict={'a': a, 'b': b, 'c': c})
x = re_evaluate()
assert_array_equal(x, array([86., 124., 168.]))
# Test for issue #37
if sys.version_info[0] < 3:
# In python 3 '/' perforns true division, not integer division.
# Integer division '//' is still not suppoerted by numexpr
def test_zero_div(self):
x = arange(100, dtype='i4')
y = evaluate("1/x")
x2 = zeros(100, dtype='i4')
x2[1] = 1
assert_array_equal(x2, y)
# Test for issue #22
def test_true_div(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x/2"), x / 2)
assert_array_equal(evaluate("x/2", truediv=False), x / 2)
assert_array_equal(evaluate("x/2", truediv='auto'), x / 2)
assert_array_equal(evaluate("x/2", truediv=True), x / 2.0)
def test_left_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x<<2"), x << 2)
def test_right_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x>>2"), x >> 2)
# PyTables uses __nonzero__ among ExpressionNode objects internally
# so this should be commented out for the moment. See #24.
def test_boolean_operator(self):
x = arange(10, dtype='i4')
try:
evaluate("(x > 1) and (x < 9)")
except TypeError:
pass
else:
raise ValueError("should raise exception!")
def test_rational_expr(self):
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = evaluate("(a + 2*b) / (1 + a + 4*b*b)")
assert_array_almost_equal(x, y)
def test_complex_expr(self):
def complex(a, b):
c = zeros(a.shape, dtype=complex_)
c.real = a
c.imag = b
return c
a = arange(1e4)
b = arange(1e4) ** 1e-5
z = a + 1j * b
x = z.imag
x = sin(complex(a, b)).real + z.imag
y = evaluate("sin(complex(a, b)).real + z.imag")
assert_array_almost_equal(x, y)
def test_complex_strides(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(50).reshape(5, 10)
assert_array_equal(evaluate("a+b"), a + b)
c = empty([10], dtype=[('c1', int32), ('c2', uint16)])
c['c1'] = arange(10)
c['c2'].fill(0xaaaa)
c1 = c['c1']
a0 = a[0]
assert_array_equal(evaluate("c1"), c1)
assert_array_equal(evaluate("a0+c1"), a0 + c1)
def test_recarray_strides(self):
a = arange(100)
b = arange(100,200)
recarr = np.rec.array(None, formats='f4,f4', shape=(100,))
recarr['f0'] = a
recarr['f1'] = b
c = recarr['f1']
assert_array_almost_equal(evaluate("sqrt(c) > 1."), sqrt(c) > 1.)
assert_array_almost_equal(evaluate("log10(c)"), log10(c))
def test_broadcasting(self):
a = arange(100).reshape(10, 10)[::2]
c = arange(10)
d = arange(5).reshape(5, 1)
assert_array_equal(evaluate("a+c"), a + c)
assert_array_equal(evaluate("a+d"), a + d)
expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)])
assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c)
def test_all_scalar(self):
a = 3.
b = 4.
assert_allclose(evaluate("a+b"), a + b)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_equal(expr(a, b), 2 * a + 3 * b)
def test_run(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(10)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_array_equal(expr(a, b), expr.run(a, b))
def test_illegal_value(self):
a = arange(3)
try:
evaluate("a < [0, 0, 0]")
except TypeError:
pass
else:
self.fail()
def test_ex_uses_vml(self):
vml_funcs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan",
"sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh",
"log", "log1p","log10", "exp", "expm1", "abs", "conj",
"arctan2", "fmod"]
for func in vml_funcs:
strexpr = func+'(a)'
_, ex_uses_vml = numexpr.necompiler.getExprNames(strexpr, {})
assert_equal(ex_uses_vml, use_vml, strexpr)
if 'sparc' not in platform.machine():
# Execution order set here so as to not use too many threads
# during the rest of the execution. See #33 for details.
def test_changing_nthreads_00_inc(self):
a = linspace(-1, 1, 1000000)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(1, 7):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
def test_changing_nthreads_01_dec(self):
a = linspace(-1, 1, 1000000)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(6, 1, -1):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
tests = [
('MISC', ['b*c+d*e',
'2*a+3*b',
'-a',
'sinh(a)',
'2*a + (cos(3)+5)*sinh(cos(b))',
'2*a + arctan2(a, b)',
'arcsin(0.5)',
'where(a != 0.0, 2, a)',
'where(a > 10, b < a, b > a)',
'where((a-10).real != 0.0, a, 2)',
'0.25 * (a < 5) + 0.33 * (a >= 5)',
'cos(1+1)',
'1+1',
'1',
'cos(a2)',
])]
optests = []
for op in list('+-*/%') + ['**']:
optests.append("(a+1) %s (b+3)" % op)
optests.append("3 %s (b+3)" % op)
optests.append("(a+1) %s 4" % op)
optests.append("2 %s (b+3)" % op)
optests.append("(a+1) %s 2" % op)
optests.append("(a+1) %s -1" % op)
optests.append("(a+1) %s 0.5" % op)
# Check divisions and modulus by zero (see ticket #107)
optests.append("(a+1) %s 0" % op)
tests.append(('OPERATIONS', optests))
cmptests = []
for op in ['<', '<=', '==', '>=', '>', '!=']:
cmptests.append("a/2+5 %s b" % op)
cmptests.append("a/2+5 %s 7" % op)
cmptests.append("7 %s b" % op)
cmptests.append("7.0 %s 5" % op)
tests.append(('COMPARISONS', cmptests))
func1tests = []
for func in ['copy', 'ones_like', 'sqrt',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj',
'ceil', 'floor']:
func1tests.append("a + %s(b+c)" % func)
tests.append(('1_ARG_FUNCS', func1tests))
func2tests = []
for func in ['arctan2', 'fmod']:
func2tests.append("a + %s(b+c, d+1)" % func)
func2tests.append("a + %s(b+c, 1)" % func)
func2tests.append("a + %s(1, d+1)" % func)
tests.append(('2_ARG_FUNCS', func2tests))
powtests = []
# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS"
for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3):
powtests.append("(a+1)**%s" % n)
tests.append(('POW_TESTS', powtests))
def equal(a, b, exact):
if array_equal(a, b):
return True
if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']:
nnans = isnan(a).sum()
if nnans > 0:
# For results containing NaNs, just check that the number
# of NaNs is the same in both arrays. This check could be
# made more exhaustive, but checking element by element in
# python space is very expensive in general.
return nnans == isnan(b).sum()
ninfs = isinf(a).sum()
if ninfs > 0:
# Ditto for Inf's
return ninfs == isinf(b).sum()
if exact:
return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
else:
if hasattr(a, 'dtype') and a.dtype == 'f4':
atol = 1e-5 # Relax precission for special opcodes, like fmod
else:
atol = 1e-8
return (shape(a) == shape(b) and
allclose(ravel(a), ravel(b), atol=atol))
class Skip(Exception): pass
def test_expressions():
test_no = [0]
def make_test_method(a, a2, b, c, d, e, x, expr,
test_scalar, dtype, optimization, exact, section):
this_locals = locals()
def method():
try:
# We don't want to listen at RuntimeWarnings like
# "overflows" or "divide by zero" in plain eval().
warnings.simplefilter("ignore")
npval = eval(expr, globals(), this_locals)
warnings.simplefilter("always")
npval = eval(expr, globals(), this_locals)
except Exception as ex:
# just store the exception in a variable
# compatibility with numpy v1.12
# see also https://github.com/pydata/numexpr/issues/239
np_exception = ex
npval = None
else:
np_exception = None
try:
neval = evaluate(expr, local_dict=this_locals,
optimization=optimization)
except AssertionError:
raise
except NotImplementedError:
print('%r not implemented for %s (scalar=%d, opt=%s)'
% (expr, dtype.__name__, test_scalar, optimization))
except Exception as ne_exception:
same_exc_type = issubclass(type(ne_exception),
type(np_exception))
if np_exception is None or not same_exc_type:
print('numexpr error for expression %r' % (expr,))
raise
except:
print('numexpr error for expression %r' % (expr,))
raise
else:
msg = ('expected numexpr error not raised for expression '
'%r' % (expr,))
assert np_exception is None, msg
assert equal(npval, neval, exact), """%r
(test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__,
optimization, exact,
npval, type(npval), shape(npval),
neval, type(neval), shape(neval))
method.description = ('test_expressions(%s, test_scalar=%r, '
'dtype=%r, optimization=%r, exact=%r)') % (expr, test_scalar, dtype.__name__, optimization, exact)
test_no[0] += 1
method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar,
dtype.__name__,
optimization.encode('ascii'),
section.encode('ascii'),
test_no[0])
return method
x = None
for test_scalar in (0, 1, 2):
for dtype in (int, long, np.float32, double, complex):
array_size = 100
a = arange(2 * array_size, dtype=dtype)[::2]
a2 = zeros([array_size, array_size], dtype=dtype)
b = arange(array_size, dtype=dtype) / array_size
c = arange(array_size, dtype=dtype)
d = arange(array_size, dtype=dtype)
e = arange(array_size, dtype=dtype)
if dtype == complex:
a = a.real
for x in [a2, b, c, d, e]:
x += 1j
x *= 1 + 1j
if test_scalar == 1:
a = a[array_size // 2]
if test_scalar == 2:
b = b[array_size // 2]
for optimization, exact in [
('none', False), ('moderate', False), ('aggressive', False)]:
for section_name, section_tests in tests:
for expr in section_tests:
if (dtype == complex and
('<' in expr or '>' in expr or '%' in expr
or "arctan2" in expr or "fmod" in expr
or "floor" in expr or "ceil" in expr)):
# skip complex comparisons or functions not
# defined in complex domain.
continue
if (dtype in (int, long) and test_scalar and
expr == '(a+1) ** -1'):
continue
m = make_test_method(a, a2, b, c, d, e, x,
expr, test_scalar, dtype,
optimization, exact,
section_name)
yield m
class test_int64(TestCase):
def test_neg(self):
a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64)
res = evaluate('-a')
assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63])
self.assertEqual(res.dtype.name, 'int64')
class test_int32_int64(TestCase):
if sys.version_info[0] < 2:
# no long literals in python 3
def test_small_long(self):
# Small longs should not be downgraded to ints.
res = evaluate('42L')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_small_int(self):
# Small ints (32-bit ones) should not be promoted to longs.
res = evaluate('2')
assert_array_equal(res, 2)
self.assertEqual(res.dtype.name, 'int32')
def test_big_int(self):
# Big ints should be promoted to longs.
res = evaluate('2**40')
assert_array_equal(res, 2 ** 40)
self.assertEqual(res.dtype.name, 'int64')
def test_long_constant_promotion(self):
int32array = arange(100, dtype='int32')
itwo = np.int32(2)
ltwo = np.int64(2)
res = int32array * 2
res32 = evaluate('int32array * itwo')
res64 = evaluate('int32array * ltwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
int32array = arange(100, dtype='int32')
int64array = arange(100, dtype='int64')
respy = int32array * int64array
resnx = evaluate('int32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_uint32_int64(TestCase):
def test_small_uint32(self):
# Small uint32 should not be downgraded to ints.
a = np.uint32(42)
res = evaluate('a')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_uint32_constant_promotion(self):
int32array = arange(100, dtype='int32')
stwo = np.int32(2)
utwo = np.uint32(2)
res = int32array * utwo
res32 = evaluate('int32array * stwo')
res64 = evaluate('int32array * utwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
uint32array = arange(100, dtype='uint32')
int64array = arange(100, dtype='int64')
respy = uint32array * int64array
resnx = evaluate('uint32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_strings(TestCase):
BLOCK_SIZE1 = 128
BLOCK_SIZE2 = 8
str_list1 = [b'foo', b'bar', b'', b' ']
str_list2 = [b'foo', b'', b'x', b' ']
str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1)
str_array1 = array(str_list1 * str_nloops)
str_array2 = array(str_list2 * str_nloops)
str_constant = b'doodoo'
def test_null_chars(self):
str_list = [
b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0',
b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0']
for s in str_list:
r = evaluate('s')
self.assertEqual(s, r.tostring()) # check *all* stored data
def test_compare_copy(self):
sarr = self.str_array1
expr = 'sarr'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 >= sarr2'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_variable(self):
sarr = self.str_array1
svar = self.str_constant
expr = 'sarr >= svar'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_constant(self):
sarr = self.str_array1
expr = 'sarr >= %r' % self.str_constant
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_add_string_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 + sarr2'
self.assert_missing_op('add_sss', expr, locals())
def test_empty_string1(self):
a = np.array([b"", b"pepe"])
b = np.array([b"pepe2", b""])
res = evaluate("(a == b'') & (b == b'pepe2')")
assert_array_equal(res, np.array([True, False]))
res2 = evaluate("(a == b'pepe') & (b == b'')")
assert_array_equal(res2, np.array([False, True]))
def test_empty_string2(self):
a = np.array([b"p", b"pepe"])
b = np.array([b"pepe2", b""])
res = evaluate("(a == b'') & (b == b'pepe2')")
assert_array_equal(res, np.array([False, False]))
res2 = evaluate("(a == b'pepe') & (b == b'')")
assert_array_equal(res, np.array([False, False]))
def test_add_numeric_array(self):
sarr = self.str_array1
narr = arange(len(sarr), dtype='int32')
expr = 'sarr >= narr'
self.assert_missing_op('ge_bsi', expr, locals())
def assert_missing_op(self, op, expr, local_dict):
msg = "expected NotImplementedError regarding '%s'" % op
try:
evaluate(expr, local_dict)
except NotImplementedError as nie:
if "'%s'" % op not in nie.args[0]:
self.fail(msg)
else:
self.fail(msg)
def test_compare_prefix(self):
# Check comparing two strings where one is a prefix of the
# other.
for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'),
(b'foo\0a', b'foo\0bar')]:
self.assertTrue(evaluate('s1 < s2'))
self.assertTrue(evaluate('s1 <= s2'))
self.assertTrue(evaluate('~(s1 == s2)'))
self.assertTrue(evaluate('~(s1 >= s2)'))
self.assertTrue(evaluate('~(s1 > s2)'))
# Check for NumPy array-style semantics in string equality.
s1, s2 = b'foo', b'foo\0\0'
self.assertTrue(evaluate('s1 == s2'))
# Case for testing selections in fields which are aligned but whose
# data length is not an exact multiple of the length of the record.
# The following test exposes the problem only in 32-bit machines,
# because in 64-bit machines 'c2' is unaligned. However, this should
# check most platforms where, while not unaligned, 'len(datatype) >
# boundary_alignment' is fullfilled.
class test_irregular_stride(TestCase):
def test_select(self):
f0 = arange(10, dtype=int32)
f1 = arange(10, dtype=float64)
irregular = rec.fromarrays([f0, f1])
f0 = irregular['f0']
f1 = irregular['f1']
i0 = evaluate('f0 < 5')
i1 = evaluate('f1 < 5')
assert_array_equal(f0[i0], arange(5, dtype=int32))
assert_array_equal(f1[i1], arange(5, dtype=float64))
# Cases for testing arrays with dimensions that can be zero.
class test_zerodim(TestCase):
def test_zerodim1d(self):
a0 = array([], dtype=int32)
a1 = array([], dtype=float64)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
def test_zerodim3d(self):
a0 = array([], dtype=int32).reshape(0, 2, 4)
a1 = array([], dtype=float64).reshape(0, 2, 4)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
@contextmanager
def _environment(key, value):
old = os.environ.get(key)
os.environ[key] = value
try:
yield
finally:
if old:
os.environ[key] = old
else:
del os.environ[key]
# Test cases for the threading configuration
class test_threading_config(TestCase):
def test_max_threads_unset(self):
# Has to be done in a subprocess as `importlib.reload` doesn't let us
# re-initialize the threadpool
script = '\n'.join([
"import os",
"if 'NUMEXPR_MAX_THREADS' in os.environ: os.environ.pop('NUMEXPR_MAX_THREADS')",
"import numexpr",
"assert(numexpr.nthreads <= 8)",
"exit(0)"])
subprocess.check_call([sys.executable, '-c', script])
def test_max_threads_set(self):
# Has to be done in a subprocess as `importlib.reload` doesn't let us
# re-initialize the threadpool
script = '\n'.join([
"import os",
"os.environ['NUMEXPR_MAX_THREADS'] = '4'",
"import numexpr",
"assert(numexpr.MAX_THREADS == 4)",
"exit(0)"])
subprocess.check_call([sys.executable, '-c', script])
def test_numexpr_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
# NUMEXPR_NUM_THREADS has priority
with _environment('NUMEXPR_NUM_THREADS', '3'):
self.assertEquals(3, numexpr._init_num_threads())
def test_omp_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
self.assertEquals(5, numexpr._init_num_threads())
# Case test for threads
class test_threading(TestCase):
def test_thread(self):
import threading
class ThreadTest(threading.Thread):
def run(self):
a = arange(3)
assert_array_equal(evaluate('a**3'), array([0, 1, 8]))
test = ThreadTest()
test.start()
test.join()
def test_multithread(self):
import threading
# Running evaluate() from multiple threads shouldn't crash
def work(n):
a = arange(n)
evaluate('a+a')
work(10) # warm compilation cache
nthreads = 30
threads = [threading.Thread(target=work, args=(1e5,))
for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
# The worker function for the subprocess (needs to be here because Windows
# has problems pickling nested functions with the multiprocess module :-/)
def _worker(qout=None):
ra = np.arange(1e3)
rows = evaluate('ra > 0')
#print "Succeeded in evaluation!\n"
if qout is not None:
qout.put("Done")
# Case test for subprocesses (via multiprocessing module)
class test_subprocess(TestCase):
def test_multiprocess(self):
try:
import multiprocessing as mp
except ImportError:
return
# Check for two threads at least
numexpr.set_num_threads(2)
#print "**** Running from main process:"
_worker()
#print "**** Running from subprocess:"
qout = mp.Queue()
ps = mp.Process(target=_worker, args=(qout,))
ps.daemon = True
ps.start()
result = qout.get()
#print result
def print_versions():
"""Print the versions of software that numexpr relies on."""
# from pkg_resources import parse_version
from numexpr.cpuinfo import cpu
import platform
np_version = LooseVersion(np.__version__)
if np_version < minimum_numpy_version:
print('*Warning*: NumPy version is lower than recommended: %s < %s' % (np_version, minimum_numpy_version))
print('-=' * 38)
print('Numexpr version: %s' % numexpr.__version__)
print('NumPy version: %s' % np.__version__)
print('Python version: %s' % sys.version)
(sysname, nodename, release, os_version, machine, processor) = platform.uname()
print('Platform: %s-%s-%s' % (sys.platform, machine, os_version))
try:
# cpuinfo doesn't work on OSX well it seems, so protect these outputs
# with a try block
cpu_info = cpu.info[0]
print('CPU vendor: %s' % cpu_info.get('VendorIdentifier', ''))
print('CPU model: %s' % cpu_info.get('ProcessorNameString', ''))
print('CPU clock speed: %s MHz' % cpu_info.get('~MHz',''))
except KeyError:
pass
print('VML available? %s' % use_vml)
if use_vml:
print('VML/MKL version: %s' % numexpr.get_vml_version())
print('Number of threads used by default: %d '
'(out of %d detected cores)' % (numexpr.nthreads, numexpr.ncores))
print('Maximum number of threads: %s' % numexpr.MAX_THREADS)
print('-=' * 38)
def test(verbosity=1):
"""
Run all the tests in the test suite.
"""
print_versions()
# For some reason, NumPy issues all kinds of warnings when using Python3.
# Ignoring them in tests should be ok, as all results are checked out.
# See https://github.com/pydata/numexpr/issues/183 for details.
np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore')
return unittest.TextTestRunner(verbosity=verbosity).run(suite())
test.__test__ = False
def suite():
import unittest
import platform as pl
theSuite = unittest.TestSuite()
niter = 1
class TestExpressions(TestCase):
pass
def add_method(func):
def method(self):
return func()
setattr(TestExpressions, func.__name__,
method.__get__(None, TestExpressions))
for func in test_expressions():
add_method(func)
for n in range(niter):
theSuite.addTest(unittest.makeSuite(test_numexpr))
if 'sparc' not in platform.machine():
theSuite.addTest(unittest.makeSuite(test_numexpr2))
theSuite.addTest(unittest.makeSuite(test_evaluate))
theSuite.addTest(unittest.makeSuite(TestExpressions))
theSuite.addTest(unittest.makeSuite(test_int32_int64))
theSuite.addTest(unittest.makeSuite(test_uint32_int64))
theSuite.addTest(unittest.makeSuite(test_strings))
theSuite.addTest(
unittest.makeSuite(test_irregular_stride))
theSuite.addTest(unittest.makeSuite(test_zerodim))
theSuite.addTest(unittest.makeSuite(test_threading_config))
# multiprocessing module is not supported on Hurd/kFreeBSD
if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')):
theSuite.addTest(unittest.makeSuite(test_subprocess))
# I need to put this test after test_subprocess because
# if not, the test suite locks immediately before test_subproces.
# This only happens with Windows, so I suspect of a subtle bad
# interaction with threads and subprocess :-/
theSuite.addTest(unittest.makeSuite(test_threading))
return theSuite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
# suite = suite()
# unittest.TextTestRunner(verbosity=2).run(suite)
| robbmcleod/numexpr | numexpr/tests/test_numexpr.py | Python | mit | 44,482 |
import unittest
from politicians import solution
class TestSolution(unittest.TestCase):
def test_two_names(self):
names = ("Euh", "Mlb")
s = solution(names)
self.assertEqual(s.candidates, [("Euh", "Mlb", "humble")], "'Euh' and 'Mlb' did not generate 'humble'.")
def test_three_names(self):
names = ("Euh", "Mlb", "Parsley")
s = solution(names)
self.assertEqual(s.candidates, [("Euh", "Mlb", "humble")], "'Euh', 'Mlb' and 'Parsley' did not generate 'humble'.")
def test_four_names(self):
names = ("Euh", "Mlb", "Parsley", "Esquire")
s = solution(names)
self.assertEqual(s.candidates, [("Euh", "Mlb", "humble")], "'Euh', 'Mlb', 'Parsley', 'Esquire' did not generate 'humble'.")
def test_many_names(self):
names = ('N', 'Ben', 'Frances', 'Hillary', 'Bacon', 'Newheart', 'Guoy')
switched = [('Guoy', 'N', 'young')]
s = solution(names, verbose=True)
self.assertEqual(s.candidates, switched)
if __name__ == "__main__":
unittest.main()
| johnobrien/pyshortz | solutions/20150419/test_politicians.py | Python | mit | 1,067 |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shows how to load a Kubernetes config from outside of the cluster.
"""
from kubernetes import client, config
def main():
# Configs can be set in Configuration class directly or using helper
# utility. If no argument provided, the config will be loaded from
# default location.
config.load_kube_config()
v1 = client.CoreV1Api()
print("Listing pods with their IPs:")
ret = v1.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
print("%s\t%s\t%s" %
(i.status.pod_ip, i.metadata.namespace, i.metadata.name))
if __name__ == '__main__':
main()
| kubernetes-client/python | examples/out_of_cluster_config.py | Python | apache-2.0 | 1,204 |
import sys
import argparse
from .base import generate_word
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("amount", type=str,
help="Amount to convert in string")
args = parser.parse_args(sys.argv[1:])
print generate_word(args.amount)
| pirsquare/chequeconvert-python | chequeconvert/main.py | Python | mit | 406 |
from httplib import NOT_FOUND, OK
import json
import os
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from opendebates.models import Submission, Vote, Flag, SiteMode
from .factories import UserFactory, VoterFactory, SubmissionFactory, RemovalFlagFactory, \
MergeFlagFactory
class ModerationTest(TestCase):
def setUp(self):
self.first_submission = SubmissionFactory()
self.second_submission = SubmissionFactory()
self.third_submission = SubmissionFactory()
self.preview_url = reverse('moderation_preview')
self.merge_url = reverse('moderation_merge')
self.remove_url = reverse('moderation_remove')
self.moderation_home_url = reverse('moderation_home')
self.password = 'secretpassword'
self.user = UserFactory(password=self.password, is_staff=True, is_superuser=True)
self.voter = VoterFactory(user=self.user, email=self.user.email)
assert self.client.login(username=self.user.username, password=self.password)
os.environ['NORECAPTCHA_TESTING'] = 'True'
def tearDown(self):
del os.environ['NORECAPTCHA_TESTING']
def test_redirects_to_login(self):
login_url = settings.LOGIN_URL + '?next=' + self.preview_url
self.client.logout()
rsp = self.client.get(self.preview_url)
self.assertRedirects(rsp, login_url)
def test_nonsuperuser_404(self):
self.user.is_superuser = False
self.user.save()
rsp = self.client.get(self.preview_url)
self.assertEqual(NOT_FOUND, rsp.status_code)
def test_get(self):
rsp = self.client.get(self.preview_url)
self.assertContains(rsp, 'Preview Moderation')
def test_get_with_initial(self):
rsp = self.client.get(self.preview_url + "?to_remove=10")
self.assertContains(rsp, 'name="to_remove" type="number" value="10"')
def test_merge_submission(self):
self.assertEqual(self.second_submission.has_duplicates, False)
self.assertEqual(self.second_submission.duplicate_of, None)
self.assertEqual(self.second_submission.keywords, None)
self.assertEqual(self.third_submission.has_duplicates, False)
self.assertEqual(self.third_submission.duplicate_of, None)
self.assertEqual(self.third_submission.keywords, None)
rsp = self.client.post(self.merge_url, data={
"action": "merge",
"to_remove": self.third_submission.id,
"duplicate_of": self.second_submission.id,
})
self.assertRedirects(rsp, self.moderation_home_url)
merged = Submission.objects.get(id=self.third_submission.id)
remaining = Submission.objects.get(id=self.second_submission.id)
# The merged submission should now be marked as a duplicate
self.assertEqual(merged.duplicate_of, remaining)
# The remaining submission should now be marked as having duplicates
self.assertEqual(remaining.has_duplicates, True)
# The merged submission's content is copied into the remaining one's keywords
self.assertIn(merged.idea, remaining.keywords)
# Viewing the merged submission now lands visitors on the remaining submission
# with a fragment referring to the merged submission's place on the page
rsp = self.client.get(merged.get_absolute_url())
self.assertRedirects(rsp,
reverse('show_idea', args=[remaining.id]) + (
"#i%s" % merged.id))
def test_nested_merge_submission(self):
self.client.post(self.merge_url, data={
"action": "merge",
"to_remove": self.third_submission.id,
"duplicate_of": self.second_submission.id,
})
self.client.post(self.merge_url, data={
"action": "merge",
"to_remove": self.second_submission.id,
"duplicate_of": self.first_submission.id,
})
merged_deep = Submission.objects.get(id=self.third_submission.id)
merged_shallow = Submission.objects.get(id=self.second_submission.id)
remaining = Submission.objects.get(id=self.first_submission.id)
self.assertEqual(merged_deep.duplicate_of, merged_shallow)
self.assertEqual(merged_shallow.duplicate_of, remaining)
self.assertEqual(merged_shallow.has_duplicates, True)
self.assertEqual(remaining.has_duplicates, True)
# After a nested merge, the remaining submission's keywords will include
# text from all merged submissions up the chain
self.assertIn(merged_shallow.idea, remaining.keywords)
self.assertIn(merged_deep.idea, remaining.keywords)
def test_merged_votes_relocate_if_unique(self):
"During a merge, only unique votes get moved over to the remaining submission"
self.client.logout()
first_voter = VoterFactory(user=None)
second_voter = VoterFactory(user=None)
third_voter = VoterFactory(user=None)
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': first_voter.email, 'zipcode': first_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': second_voter.email, 'zipcode': second_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.second_submission.get_absolute_url(), data={
'email': first_voter.email, 'zipcode': first_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.second_submission.get_absolute_url(), data={
'email': third_voter.email, 'zipcode': third_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
# Initially each submission's vote tally will include all votes that we just cast
# plus one for the submitter
self.assertEqual(Submission.objects.get(id=self.second_submission.id).votes, 3)
self.assertEqual(Submission.objects.get(id=self.third_submission.id).votes, 3)
assert self.client.login(username=self.user.username, password=self.password)
rsp = self.client.post(self.merge_url, data={
"action": "merge",
"to_remove": self.third_submission.id,
"duplicate_of": self.second_submission.id,
})
self.assertRedirects(rsp, self.moderation_home_url)
merged = Submission.objects.get(id=self.third_submission.id)
remaining = Submission.objects.get(id=self.second_submission.id)
# The merged idea retains its original vote tally, and the remaining idea
# has a new vote tally reflecting all unique voters who have voted on either
self.assertEqual(merged.votes, 3)
self.assertEqual(remaining.votes, 4)
# The vote cast by second_voter has been re-cast for the remaining submission
# and retains a pointer to its original submission for future audits
moved_vote = Vote.objects.get(voter=second_voter)
self.assertEqual(moved_vote.submission, remaining)
self.assertEqual(moved_vote.original_merged_submission, merged)
self.assertEqual(0, Vote.objects.filter(
voter=second_voter, submission=merged).count())
# The vote cast by first_voter on the merged idea has not been re-cast,
# since first_voter had already cast a vote on the remaining submission
self.assertEqual(1, Vote.objects.filter(
voter=first_voter, submission=merged).count())
def test_local_votes_tally_updates_after_merge(self):
"During a merge, only unique votes get moved over to the remaining submission"
mode, _ = SiteMode.objects.get_or_create()
mode.debate_state = "FL"
mode.save()
self.client.logout()
nonlocal_voter = VoterFactory(user=None, state="NY")
first_local_voter = VoterFactory(user=None, state="FL")
second_local_voter = VoterFactory(user=None, state="FL")
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': nonlocal_voter.email, 'zipcode': nonlocal_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': first_local_voter.email, 'zipcode': first_local_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.second_submission.get_absolute_url(), data={
'email': first_local_voter.email, 'zipcode': first_local_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': second_local_voter.email, 'zipcode': second_local_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
self.assertEqual(Submission.objects.get(id=self.second_submission.id).local_votes,
1)
self.assertEqual(Submission.objects.get(id=self.third_submission.id).local_votes,
2)
assert self.client.login(username=self.user.username, password=self.password)
rsp = self.client.post(self.merge_url, data={
"action": "merge",
"to_remove": self.third_submission.id,
"duplicate_of": self.second_submission.id,
})
self.assertRedirects(rsp, self.moderation_home_url)
merged = Submission.objects.get(id=self.third_submission.id)
remaining = Submission.objects.get(id=self.second_submission.id)
# The merged idea retains its original vote tally, and the remaining idea
# has a new vote tally reflecting all unique local voters who have voted on either
self.assertEqual(merged.local_votes, 2)
self.assertEqual(remaining.local_votes, 2)
def test_unmoderate_does_not_merge_votes(self):
"During a duplicate unmoderate, no vote merging occurs"
self.client.logout()
first_voter = VoterFactory(user=None)
second_voter = VoterFactory(user=None)
third_voter = VoterFactory(user=None)
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': first_voter.email, 'zipcode': first_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.third_submission.get_absolute_url(), data={
'email': second_voter.email, 'zipcode': second_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.second_submission.get_absolute_url(), data={
'email': first_voter.email, 'zipcode': first_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
rsp = self.client.post(self.second_submission.get_absolute_url(), data={
'email': third_voter.email, 'zipcode': third_voter.zip,
'g-recaptcha-response': 'PASSED'
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual("200", json.loads(rsp.content)['status'])
# Initially each submission's vote tally will include all votes that we just cast
# plus one for the submitter
self.assertEqual(Submission.objects.get(id=self.second_submission.id).votes, 3)
self.assertEqual(Submission.objects.get(id=self.third_submission.id).votes, 3)
assert self.client.login(username=self.user.username, password=self.password)
rsp = self.client.post(self.merge_url, data={
"action": "unmoderate",
"to_remove": self.third_submission.id,
"duplicate_of": self.second_submission.id,
})
self.assertRedirects(rsp, self.moderation_home_url)
merged = Submission.objects.get(id=self.third_submission.id)
remaining = Submission.objects.get(id=self.second_submission.id)
# Both ideas retain their original vote tallies
self.assertEqual(merged.votes, 3)
self.assertEqual(remaining.votes, 3)
# And vote objects are untouched
moved_vote = Vote.objects.get(voter=second_voter)
self.assertEqual(moved_vote.submission, merged)
self.assertEqual(moved_vote.original_merged_submission, None)
self.assertEqual(0, Vote.objects.filter(
voter=second_voter, submission=remaining).count())
# But the duplicate submission is now unavailable & has been marked as duplicate
self.assertEqual(False, merged.approved)
self.assertEqual(remaining, merged.duplicate_of)
# Meanwhile the remaining submission has no direct record of the merge into it
self.assertEqual(False, remaining.has_duplicates)
rsp = self.client.get(merged.get_absolute_url())
self.assertEqual(NOT_FOUND, rsp.status_code)
def test_merge_link_hidden_after_merge(self):
merge_url = reverse('merge', args=[self.first_submission.pk])
rsp = self.client.get(self.first_submission.get_absolute_url())
self.assertContains(rsp, merge_url)
self.first_submission.duplicate_of = self.second_submission
self.first_submission.save()
self.second_submission.has_duplicates = True
self.second_submission.save()
rsp = self.client.get(self.first_submission.get_absolute_url(), follow=True)
self.assertNotContains(rsp, merge_url)
def test_remove_submission(self):
data = {
'to_remove': self.first_submission.pk,
'duplicate_of': '',
'action': 'remove',
}
rsp = self.client.post(self.remove_url, data=data)
self.assertRedirects(rsp, self.moderation_home_url)
refetched_sub = Submission.objects.get(pk=self.first_submission.pk)
self.assertFalse(refetched_sub.approved)
def test_reject_merge(self):
# pretend a user created a merge flag
flag = MergeFlagFactory(to_remove=self.first_submission,
duplicate_of=self.second_submission)
# now let's reject the merge
data = {
'to_remove': self.first_submission.pk,
'duplicate_of': self.second_submission.pk,
'action': 'reject',
}
rsp = self.client.post(self.merge_url, data=data)
self.assertRedirects(rsp, self.moderation_home_url)
refetched_sub1 = Submission.objects.get(pk=self.first_submission.pk)
refetched_sub2 = Submission.objects.get(pk=self.second_submission.pk)
self.assertEqual(refetched_sub1.duplicate_of, None)
self.assertEqual(refetched_sub2.has_duplicates, False)
# and Flag is now marked reviewed
refetched_flag = Flag.objects.get(pk=flag.pk)
self.assertEqual(refetched_flag.reviewed, True)
def test_preview_missing_submission(self):
data = {
'to_remove': ''
}
rsp = self.client.post(self.preview_url, data=data)
self.assertEqual(OK, rsp.status_code)
self.assertIn('This field is required', str(rsp.context['form'].errors))
def test_preview_bad_submissions(self):
data = {
'to_remove': self.first_submission.pk,
'duplicate_of': self.second_submission.pk
}
self.first_submission.delete()
self.second_submission.delete()
rsp = self.client.post(self.preview_url, data=data)
self.assertEqual(OK, rsp.status_code)
self.assertIn('submission does not exist', rsp.context['form'].errors['to_remove'][0])
self.assertIn('submission does not exist', rsp.context['form'].errors['duplicate_of'][0])
def test_preview_must_be_different_submissions(self):
data = {
'to_remove': self.first_submission.pk,
'duplicate_of': self.first_submission.pk
}
rsp = self.client.post(self.preview_url, data=data)
self.assertEqual(OK, rsp.status_code)
self.assertIn('Cannot merge a submission into itself', str(rsp.context['form'].errors))
class ModerationHomeTest(TestCase):
def setUp(self):
self.password = 'secretpassword'
self.user = UserFactory(password=self.password, is_staff=True, is_superuser=True)
self.voter = VoterFactory(user=self.user, email=self.user.email)
assert self.client.login(username=self.user.username, password=self.password)
self.url = reverse('moderation_home')
def test_redirects_to_login(self):
login_url = settings.LOGIN_URL + '?next=' + self.url
self.client.logout()
rsp = self.client.get(self.url)
self.assertRedirects(rsp, login_url)
def test_nonsuperuser_404(self):
self.user.is_superuser = False
self.user.save()
rsp = self.client.get(self.url)
self.assertEqual(NOT_FOUND, rsp.status_code)
def test_removal_flags_queryset(self):
# these submissions should not be on our page
submission_without_flag = SubmissionFactory()
submission_with_merge_flag = SubmissionFactory()
MergeFlagFactory(duplicate_of=submission_with_merge_flag)
submission_already_moderated = SubmissionFactory(moderated_removal=True)
RemovalFlagFactory(to_remove=submission_already_moderated)
# these submissions should be on our page
flagged_submission_1 = SubmissionFactory()
RemovalFlagFactory(to_remove=flagged_submission_1)
flagged_submission_2 = SubmissionFactory()
RemovalFlagFactory.create_batch(to_remove=flagged_submission_2, size=2)
rsp = self.client.get(self.url)
self.assertEqual(OK, rsp.status_code)
qs = rsp.context['flagged_for_removal']
self.assertNotIn(submission_without_flag, qs)
self.assertNotIn(submission_with_merge_flag, qs)
self.assertNotIn(submission_already_moderated, qs)
self.assertIn(flagged_submission_1, qs)
self.assertIn(flagged_submission_2, qs)
# qs is ordered by flag count descending
self.assertEqual([flagged_submission_2, flagged_submission_1], list(qs))
def test_merge_flags(self):
removal_flag = RemovalFlagFactory()
already_reviewed_flag = MergeFlagFactory(reviewed=True)
merge_flags = MergeFlagFactory.create_batch(size=3)
rsp = self.client.get(self.url)
self.assertEqual(OK, rsp.status_code)
qs = rsp.context['merge_flags']
self.assertNotIn(removal_flag, qs)
self.assertNotIn(already_reviewed_flag, qs)
self.assertEqual(set(merge_flags), set(qs))
class RemovalFlagTest(TestCase):
def setUp(self):
self.submission = SubmissionFactory()
self.url = reverse('report', args=[self.submission.pk])
self.login_url = settings.LOGIN_URL + '?next=' + self.url
password = 'secretPassword'
self.user = UserFactory(password=password)
self.voter = VoterFactory(user=self.user, email=self.user.email)
assert self.client.login(username=self.user.username, password=password)
def test_login_required(self):
self.client.logout()
rsp = self.client.get(self.url)
self.assertRedirects(rsp, self.login_url)
def test_get_report_page(self):
rsp = self.client.get(self.url)
self.assertContains(rsp, self.submission.headline)
self.assertContains(rsp, self.submission.followup)
def test_report_missing_submission_fails(self):
self.submission.delete()
rsp = self.client.get(self.url)
self.assertEqual(NOT_FOUND, rsp.status_code)
def test_report_is_successful(self):
rsp = self.client.post(self.url)
self.assertRedirects(rsp, self.submission.get_absolute_url())
flag = Flag.objects.get(to_remove=self.submission)
self.assertEqual(flag.voter, self.voter)
self.assertEqual(flag.duplicate_of, None)
def test_duplicate_report_ok_but_only_1_flag_created(self):
RemovalFlagFactory(to_remove=self.submission, voter=self.voter)
rsp = self.client.post(self.url)
self.assertRedirects(rsp, self.submission.get_absolute_url())
count = Flag.objects.filter(to_remove=self.submission).count()
self.assertEqual(1, count)
class MergeFlagTest(TestCase):
def setUp(self):
self.submission = SubmissionFactory()
self.url = reverse('merge', args=[self.submission.pk])
self.login_url = settings.LOGIN_URL + '?next=' + self.url
password = 'secretPassword'
self.user = UserFactory(password=password)
self.voter = VoterFactory(user=self.user, email=self.user.email)
assert self.client.login(username=self.user.username, password=password)
def test_login_required(self):
self.client.logout()
rsp = self.client.get(self.url)
self.assertRedirects(rsp, self.login_url)
def test_get_merge_page(self):
rsp = self.client.get(self.url)
self.assertContains(rsp, self.submission.headline)
self.assertContains(rsp, self.submission.followup)
def test_merge_missing_submission_fails(self):
self.submission.delete()
rsp = self.client.get(self.url)
self.assertEqual(NOT_FOUND, rsp.status_code)
def test_merge_is_successful(self):
duplicate_of = SubmissionFactory()
data = {
'duplicate_of_url': 'https://example.com' + duplicate_of.get_absolute_url()
}
rsp = self.client.post(self.url, data=data)
self.assertRedirects(rsp, self.submission.get_absolute_url())
flag = Flag.objects.get(to_remove=self.submission)
self.assertEqual(flag.voter, self.voter)
self.assertEqual(flag.duplicate_of, duplicate_of)
def test_merge_is_successful_with_show_idea_url(self):
duplicate_of = SubmissionFactory()
show_idea_url = reverse('show_idea', args=[duplicate_of.pk])
data = {
'duplicate_of_url': 'https://example.com' + show_idea_url
}
rsp = self.client.post(self.url, data=data)
self.assertRedirects(rsp, self.submission.get_absolute_url())
flag = Flag.objects.get(to_remove=self.submission)
self.assertEqual(flag.voter, self.voter)
self.assertEqual(flag.duplicate_of, duplicate_of)
def test_duplicate_merge_redirects_but_only_1_flag_created(self):
MergeFlagFactory(to_remove=self.submission, voter=self.voter)
duplicate_of = SubmissionFactory()
data = {
'duplicate_of_url': 'https://example.com' + duplicate_of.get_absolute_url()
}
rsp = self.client.post(self.url, data=data)
self.assertRedirects(rsp, self.submission.get_absolute_url())
count = Flag.objects.filter(to_remove=self.submission).count()
self.assertEqual(1, count)
def test_malformed_url(self):
data = {
'duplicate_of_url': 'this is totally not a URL'
}
rsp = self.client.post(self.url, data=data)
self.assertEqual(OK, rsp.status_code)
form = rsp.context['form']
self.assertFalse(form.is_valid())
self.assertIn('Enter a valid URL', str(form.errors))
self.assertFalse(Flag.objects.exists())
def test_valid_url_but_404(self):
data = {
'duplicate_of_url': 'https://example.com/questions/what/vote/'
}
rsp = self.client.post(self.url, data=data)
self.assertEqual(OK, rsp.status_code)
form = rsp.context['form']
self.assertFalse(form.is_valid())
self.assertIn('not the URL of a question', str(form.errors))
self.assertFalse(Flag.objects.exists())
def test_valid_url_but_not_a_question_url(self):
data = {
'duplicate_of_url': 'https://example.com/questions/'
}
rsp = self.client.post(self.url, data=data)
self.assertEqual(OK, rsp.status_code)
form = rsp.context['form']
self.assertFalse(form.is_valid())
self.assertIn('not the URL of a question', str(form.errors))
self.assertFalse(Flag.objects.exists())
def test_cant_merge_same_submission_into_itself(self):
data = {
'duplicate_of_url': 'https://example.com' + self.submission.get_absolute_url()
}
rsp = self.client.post(self.url, data=data)
self.assertEqual(OK, rsp.status_code)
form = rsp.context['form']
self.assertFalse(form.is_valid())
self.assertIn('not the URL of this submission', str(form.errors))
self.assertFalse(Flag.objects.exists())
def test_cant_merge_into_unapproved_submission(self):
duplicate_of = SubmissionFactory(approved=False)
data = {
'duplicate_of_url': 'https://example.com' + duplicate_of.get_absolute_url()
}
rsp = self.client.post(self.url, data=data)
self.assertEqual(OK, rsp.status_code)
form = rsp.context['form']
self.assertFalse(form.is_valid())
self.assertIn('Invalid Question URL', str(form.errors))
self.assertFalse(Flag.objects.exists())
| ejucovy/django-opendebates | opendebates/tests/test_moderation.py | Python | apache-2.0 | 26,364 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('flyerapp', '0004_auto_20150616_1040'),
]
operations = [
migrations.AlterField(
model_name='flight',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 16, 15, 24, 34, 253091), null=True, verbose_name=b'date published'),
),
migrations.AlterField(
model_name='place',
name='iata_code',
field=models.CharField(unique=True, max_length=3),
),
migrations.AlterField(
model_name='schedule',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 16, 15, 24, 34, 253789), null=True, verbose_name=b'date published'),
),
]
| luzeduardo/antonov225 | flyer/flyerapp/migrations/0005_auto_20150616_1524.py | Python | gpl-2.0 | 929 |
import math
import re
import string
from datetime import datetime
"""
Collection of classes that comprise the row type objects
in a nacha file
"""
class AchError(Exception):
pass
class Ach(object):
"""
Base class for ACH record fields
"""
def make_space(self, spaces=1):
"""
Return string with x number of spaces
Defaults to 1
"""
space_string = ''
for i in range(spaces):
space_string += ' '
return space_string
def make_right_justified(self, field, length):
"""
Return string with x number of leading spaces depending on field length
Routing numbers should be 9 digits long, so we technically only need 1
leading space.
"""
if len(field) != length:
return field.rjust(length)
else:
return field
def make_zero(self, zeros=1):
"""
Return string with x number of zeros
Defaults to 1
"""
zero_string = ''
for i in range(zeros):
zero_string += '0'
return zero_string
def validate_alpha_numeric_field(self, field, length):
"""
Validates alpha numeric fields for nacha files
field: (str)
length: (int)
"""
str_length = str(length)
match = re.match(r'([\w,\s,*,\\,.,~,-]{1,' + str_length + '})', field)
if match:
if len(match.group(1)) < length:
field = match.group(1) + self.make_space(
length - len(match.group(1)))
else:
field = match.group(1)
else:
raise AchError("field does not match alpha numeric criteria")
return field.upper()
def validate_numeric_field(self, field, length):
"""
Validates numeric field and zero right-pads if not
long enough.
field (int|str)
length (int)
"""
field = str(field)
if field.isdigit():
if len(field) < length:
field = self.make_zero(length - len(field)) + field
elif len(field) > length:
raise AchError("field can only be %s digits long" % length)
else:
raise AchError("field needs to be numeric characters only")
return field
def validate_binary_field(self, field):
"""
Validates binary string field (either '1' or '0')
"""
if field not in ['1', '0']:
raise AchError("filed not '1' or '0'")
return field
class Header(Ach):
"""
Creates our File Header record of the nacha file
"""
record_type_code = '1'
priority_code = '01'
record_size = '094'
blk_factor = '10'
format_code = '1'
alpha_numeric_fields = [
'immediate_dest', 'immediate_org', 'file_id_mod', 'im_dest_name',
'im_orgn_name', 'reference_code', 'file_crt_date', 'file_crt_time'
]
field_lengths = {
'immediate_dest': 10,
'immediate_org': 10,
'file_id_mod': 1,
'im_dest_name': 23,
'im_orgn_name': 23,
'reference_code': 8,
'file_crt_date': 6,
'file_crt_time': 4,
}
def __init__(self, immediate_dest='', immediate_org='', file_id_mod='A',
im_dest_name='', im_orgn_name='', reference_code=''):
"""
Initializes all values needed for
our header row
"""
date = datetime.today()
self.immediate_dest = self.make_right_justified(immediate_dest, 10)
self.immediate_org = self.make_right_justified(immediate_org, 10)
self.file_crt_date = date.strftime('%y%m%d')
self.file_crt_time = date.strftime('%H%M')
self.file_id_mod = self.validate_file_id_mod(file_id_mod)
self.im_dest_name = self.validate_alpha_numeric_field(im_dest_name, 23)
self.im_orgn_name = self.validate_alpha_numeric_field(im_orgn_name, 23)
if reference_code != '':
self.reference_code = self.validate_alpha_numeric_field(
reference_code, 8)
else:
self.reference_code = self.make_space(8)
def __setattr__(self, name, value):
if name in self.alpha_numeric_fields:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
elif name == 'file_id_mod':
value = self.validate_file_id_mod(value)
else:
raise AchError(
'%s not in alpha numeric field list' % name
)
super(Header, self).__setattr__(name, value)
def validate_file_id_mod(self, file_id_mod):
'''
Validates the file ID modifier. It has to be ascii_uppercase
and one character in length
'''
if file_id_mod not in string.ascii_uppercase and len(file_id_mod) != 1:
raise AchError("Invalid file_id_mod")
return file_id_mod
def get_row(self):
"""
returns concatenated string of all parameters in
nacha file
"""
return self.record_type_code +\
self.priority_code +\
self.immediate_dest +\
self.immediate_org +\
self.file_crt_date +\
self.file_crt_time +\
self.file_id_mod +\
self.record_size +\
self.blk_factor +\
self.format_code +\
self.im_dest_name +\
self.im_orgn_name +\
self.reference_code
def get_count(self):
"""
Returns length of all parameters in nach
file
"""
return len(self.get_row())
class FileControl(Ach):
"""
Comprises the control record for an ACH file
Appears at the end of file
"""
record_type_code = '9'
numeric_fields = [
'batch_count', 'block_count', 'entadd_count', 'entry_hash',
'debit_amount', 'credit_amount'
]
alpha_numeric_fields = ['reserved', ]
field_lengths = {
'batch_count': 6,
'block_count': 6,
'entadd_count': 8,
'entry_hash': 10,
'debit_amount': 12,
'credit_amount': 12,
'reserved': 39,
}
def __init__(self, batch_count, block_count,
entadd_count, entry_hash, debit_amount,
credit_amount):
"""
Initializes all the values we need for our file control record
"""
self.batch_count = self.validate_numeric_field(batch_count, 6)
self.block_count = self.validate_numeric_field(block_count, 6)
self.entadd_count = self.validate_numeric_field(entadd_count, 8)
self.entry_hash = self.validate_numeric_field(entry_hash, 10)
self.debit_amount = self.validate_numeric_field(debit_amount, 12)
self.credit_amount = self.validate_numeric_field(credit_amount, 12)
self.reserved = self.make_space(39)
def __setattr__(self, name, value):
if name in self.numeric_fields:
value = self.validate_numeric_field(
value, self.field_lengths[name]
)
elif name in self.alpha_numeric_fields:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
else:
raise AchError(
'%s not in numeric field list' % name
)
super(FileControl, self).__setattr__(name, value)
def get_row(self):
return self.record_type_code +\
self.batch_count +\
self.block_count +\
self.entadd_count +\
self.entry_hash +\
self.debit_amount +\
self.credit_amount +\
self.reserved
def get_count(self):
return len(self.get_row())
class BatchHeader(Ach):
record_type_code = '5'
std_ent_cls_code_list = ['ARC', 'PPD', 'CTX', 'POS', 'WEB',
'BOC', 'TEL', 'MTE', 'SHR', 'CCD',
'CIE', 'POP', 'RCK']
serv_cls_code_list = ['200', '220', '225']
numeric_fields = ['orig_dfi_id', 'batch_id',
'eff_ent_date', 'serv_cls_code']
alpha_numeric_fields = ['company_name', 'cmpy_dis_data', 'company_id',
'std_ent_cls_code', 'entry_desc', 'desc_date',
'orig_stat_code', 'settlement_date']
field_lengths = {
'serv_cls_code': 3,
'company_name': 16,
'cmpy_dis_data': 20,
'company_id': 10,
'std_ent_cls_code': 3,
'entry_desc': 10,
'desc_date': 6,
'eff_ent_date': 6,
'settlement_date': 3,
'orig_stat_code': 1,
'orig_dfi_id': 8,
'batch_id': 7,
}
def __init__(self, serv_cls_code='220', company_name='', cmpy_dis_data='',
company_id='', std_ent_cls_code='PPD', entry_desc='',
desc_date='', eff_ent_date='', orig_stat_code='',
orig_dfi_id='', batch_id=''):
"""
Initializes and validates the values for our Batch Header
rows. We use 220 and PPD as the default values for serv_cls_code
and std_ent_cls_code.
"""
args = locals().copy()
self.settlement_date = self.make_space(3)
for key in args:
if key == 'self':
continue
if args[key] != '':
self.__setattr__(key, args[key])
elif key in self.numeric_fields:
self.__setattr__(key, self.make_zero(self.field_lengths[key]))
elif key in self.alpha_numeric_fields:
self.__setattr__(key, self.make_space(self.field_lengths[key]))
def __setattr__(self, name, value):
if name in self.numeric_fields:
if name == 'serv_cls_code' \
and str(value) not in self.serv_cls_code_list:
raise AchError("%s not in serv_cls_code_list" % value)
value = self.validate_numeric_field(
value, self.field_lengths[name]
)
elif name in self.alpha_numeric_fields:
if name == 'std_ent_cls_code' \
and str(value) not in self.std_ent_cls_code_list:
raise AchError("%s not in std_ent_cls_code_list" % value)
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
else:
raise AchError(
'%s not in numeric or alpha numeric fields list' % name
)
super(BatchHeader, self).__setattr__(name, value)
def get_row(self):
return self.record_type_code +\
self.serv_cls_code +\
self.company_name +\
self.cmpy_dis_data +\
self.company_id +\
self.std_ent_cls_code +\
self.entry_desc +\
self.desc_date +\
self.eff_ent_date +\
self.settlement_date +\
self.orig_stat_code +\
self.orig_dfi_id +\
self.batch_id
def get_count(self):
return len(self.get_row())
class BatchControl(Ach):
record_type_code = '8'
numeric_fields = ['serv_cls_code', 'entadd_count', 'entry_hash',
'debit_amount', 'credit_amount', 'orig_dfi_id',
'batch_id']
alpha_numeric_fields = ['company_id', 'mesg_auth_code', 'reserved']
field_lengths = {
'serv_cls_code': 3,
'entadd_count': 6,
'entry_hash': 10,
'debit_amount': 12,
'credit_amount': 12,
'company_id': 10,
'mesg_auth_code': 19,
'reserved': 6,
'orig_dfi_id': 8,
'batch_id': 7,
}
def __init__(self, serv_cls_code='220', entadd_count='', entry_hash='',
debit_amount='', credit_amount='', company_id='',
orig_dfi_id='', batch_id='', mesg_auth_code=''):
"""
Initializes and validates the batch control record
"""
args = locals().copy()
self.reserved = self.make_space(6)
for key in args:
if key == 'self':
continue
if args[key] != '':
if key == 'debit_amount' or key == 'credit_amount':
self.__setattr__(key, int(100 * args[key]))
else:
self.__setattr__(key, args[key])
elif key in self.numeric_fields:
self.__setattr__(key, self.make_zero(self.field_lengths[key]))
elif key in self.alpha_numeric_fields:
self.__setattr__(key, self.make_space(self.field_lengths[key]))
def __setattr__(self, name, value):
if name in self.numeric_fields:
value = self.validate_numeric_field(
value, self.field_lengths[name]
)
elif name in self.alpha_numeric_fields:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
else:
raise AchError(
"%s not in numeric_fields or alpha_numeric_fields" % name
)
super(BatchControl, self).__setattr__(name, value)
def get_row(self):
return self.record_type_code +\
self.serv_cls_code +\
self.entadd_count +\
self.entry_hash +\
self.debit_amount +\
self.credit_amount +\
self.company_id +\
self.mesg_auth_code +\
self.reserved +\
self.orig_dfi_id +\
self.batch_id
def get_count(self):
return len(self.get_row())
class EntryDetail(Ach):
"""
Object represents a single Entry Detail record of an ACH file
"""
record_type_code = '6'
std_ent_cls_code_list = ['ARC', 'PPD', 'CTX', 'POS', 'WEB',
'BOC', 'TEL', 'MTE', 'SHR', 'CCD',
'CIE', 'POP', 'RCK']
numeric_fields = ['transaction_code', 'recv_dfi_id', 'check_digit',
'amount', 'num_add_recs', 'card_exp_date', 'doc_ref_num',
'ind_card_acct_num', 'card_tr_typ_code_shr',
'add_rec_ind', 'trace_num']
alpha_numeric_fields = ['dfi_acnt_num', 'chk_serial_num', 'ind_name',
'disc_data', 'id_number', 'recv_cmpy_name',
'terminal_city', 'terminal_state', 'reserved',
'card_tr_typ_code_pos', 'pmt_type_code']
field_lengths = {
'transaction_code' : 2,
'recv_dfi_id' : [8, 9],
'check_digit' : 1,
'dfi_acnt_num' : 17,
'amount' : 10,
'chk_serial_num' : [9, #POP
15,], #ARC, BOC
'ind_name' : [15, #CIE, MTE
22,], #ARC, BOC, CCD, PPD, TEL, POP, POS, WEB
'disc_data' : 2,
'id_number' : 15,
'ind_id' : 22,
'num_add_recs' : 4,
'recv_cmpy_name' : 16,
'reserved' : 2,
'terminal_city' : 4,
'terminal_state' : 2,
'card_tr_typ_code_pos' : 2,
'card_tr_typ_code_shr' : 2,
'card_exp_date' : 4,
'doc_ref_num' : 11,
'ind_card_acct_num' : 22,
'pmt_type_code' : 2,
'add_rec_ind' : 1,
'trace_num' : 15,
}
def __init__(self, std_ent_cls_code='PPD', transaction_code='', recv_dfi_id='',
check_digit='', amount='', num_add_recs='', card_exp_date='',
doc_ref_num='', ind_card_acct_num='', card_tr_typ_code_shr='',
card_tr_typ_code_pos='', trace_num='', dfi_acnt_num='',
ind_name='', disc_data='', id_number='', recv_cmpy_name='',
chk_serial_num='', terminal_city='', terminal_state='',
pmt_type_code='', add_rec_ind=''):
"""
Initialize and validate the values in Entry Detail record
"""
self.std_ent_cls_code = std_ent_cls_code
self.reserved = self.make_space(2)
fields = locals().copy()
for key in fields:
if key == 'self':
continue
if fields[key] != '':
self.__setattr__(key, fields[key])
elif key in ['chk_serial_num', 'ind_name']:
if self.std_ent_cls_code in ['CIE', 'MTE', 'POP']:
self.__setattr__(
key, self.make_space(self.field_lengths[key][0])
)
else:
self.__setattr__(
key, self.make_space(self.field_lengths[key][1])
)
elif key in self.numeric_fields:
if key == 'recv_dfi_id':
self.__setattr__(key, self.make_zero(self.field_lengths[key][0]))
else:
self.__setattr__(key, self.make_zero(self.field_lengths[key]))
elif key in self.alpha_numeric_fields:
self.__setattr__(
key, self.make_space(self.field_lengths[key])
)
def __setattr__(self, name, value):
"""
Overides the setattr method for the object. We do this so
that we can validate the field as it gets assigned.
"""
if name in self.alpha_numeric_fields:
# Special handling for Indvidiual/Company name field
if name == 'ind_name' and self.std_ent_cls_code in ['CIE', 'MTE']:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name][0]
)
elif name == 'ind_name':
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name][1]
)
# Special handling for Check serial number field
elif name == 'chk_serial_num' and \
self.std_ent_cls_code_list == 'POP':
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name][0]
)
elif name == 'chk_serial_num':
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name][1]
)
#The rest
else:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
elif name in self.numeric_fields:
if name == 'recv_dfi_id':
try:
# try 8 digits first
value = self.validate_numeric_field(value, self.field_lengths[name][0])
except AchError:
# now try to validate it 9 instead
value = self.validate_numeric_field(value, self.field_lengths[name][1])
else:
value = self.validate_numeric_field( value, self.field_lengths[name] )
elif name == 'std_ent_cls_code' and \
value in self.std_ent_cls_code_list:
pass
else:
raise AchError(
"%s not in numeric_fields or alpha_numeric_fields" % name
)
super(EntryDetail, self).__setattr__(name, value)
def get_row(self):
ret_string = ''
ret_string = self.record_type_code +\
self.transaction_code +\
self.recv_dfi_id
if len(self.recv_dfi_id) < 9:
ret_string += self.check_digit
ret_string += self.dfi_acnt_num +\
self.amount
if self.std_ent_cls_code in ['ARC', 'BOC']:
ret_string += self.chk_serial_num +\
self.ind_name +\
self.disc_data
elif self.std_ent_cls_code in ['CCD', 'PPD', 'TEL']:
ret_string += self.id_number +\
self.ind_name +\
self.disc_data
elif self.std_ent_cls_code == 'CIE':
ret_string += self.ind_name +\
self.ind_id +\
self.disc_data
elif self.std_ent_cls_code == 'CTX':
ret_string += self.id_number +\
self.num_add_recs +\
self.recv_cmpy_name +\
self.reserved +\
self.disc_data
elif self.std_ent_cls_code == 'MTE':
ret_string += self.ind_name +\
self.ind_id +\
self.disc_data
elif self.std_ent_cls_code == 'POP':
ret_string += self.chk_serial_num +\
self.terminal_city +\
self.terminal_state +\
self.ind_name +\
self.disc_data
elif self.std_ent_cls_code == 'POS':
ret_string += self.id_number +\
self.ind_name +\
self.card_tr_typ_code_pos
elif self.std_ent_cls_code == 'SHR':
ret_string += self.card_exp_date +\
self.doc_ref_num +\
self.ind_card_acct_num +\
self.card_tr_typ_code_shr
elif self.std_ent_cls_code == 'RCK':
ret_string += self.chk_serial_num +\
self.ind_name +\
self.disc_data
elif self.std_ent_cls_code == 'WEB':
ret_string += self.id_number +\
self.ind_name +\
self.pmt_type_code
ret_string += self.add_rec_ind +\
self.trace_num
return ret_string
def get_count(self):
return len(self.get_row())
def calc_check_digit(self):
multipliers = [3, 7, 1, 3, 7, 1, 3, 7]
tmp_num = 0
for num, mult in zip(list(self.recv_dfi_id), multipliers):
tmp_num += int(num) * mult
nearest_10 = math.ceil(tmp_num / 10.0)
self.check_digit = int((nearest_10 * 10) - tmp_num)
class AddendaRecord(Ach):
record_type_code = '7'
addenda_type_code = '05'
alpha_numeric_fields = [
'trans_desc', 'net_id_code', 'term_id_code',
'trans_serial_code', 'terminal_loc', 'terminal_city',
'terminal_state', 'ref_info_1', 'ref_info_2', 'pmt_rel_info',
'auth_card_exp'
]
numeric_fields = [
'trans_date', 'trans_time', 'trace_num',
'ent_det_seq_num', 'add_seq_num'
]
field_lengths = {
'trans_desc': 7,
'net_id_code': 3,
'term_id_code': 6,
'trans_serial_code': 6,
'terminal_loc': 27,
'terminal_city': 15,
'terminal_state': 2,
'ref_info_1': 7,
'ref_info_2': 3,
'pmt_rel_info': 80,
'trans_date': 4,
'trans_time': 6,
'trace_num': 15,
'ent_det_seq_num': 7,
'auth_card_exp': 6,
'add_seq_num': 4,
}
def __init__(self, std_ent_cls_code='PPD', trans_desc='', net_id_code='',
term_id_code='', ref_info_1='', ref_info_2='',
trans_serial_code='', trans_date='', trans_time='',
terminal_loc='', terminal_city='', terminal_state='',
trace_num='', auth_card_exp='', add_seq_num='',
ent_det_seq_num='', pmt_rel_info=''):
"""
Initializes and validates values in entry addenda rows
"""
fields = locals().copy()
self.std_ent_cls_code = std_ent_cls_code
for key in fields:
if key == 'self':
continue
if fields[key] != '':
self.__setattr__(key, fields[key])
elif key in self.numeric_fields:
self.__setattr__(key, self.make_zero(self.field_lengths[key]))
elif key in self.alpha_numeric_fields:
self.__setattr__(key, self.make_space(self.field_lengths[key]))
def __setattr__(self, name, value):
if name in self.alpha_numeric_fields:
value = self.validate_alpha_numeric_field(
value, self.field_lengths[name]
)
elif name in self.numeric_fields:
value = self.validate_numeric_field(
value, self.field_lengths[name]
)
elif name == 'std_ent_cls_code':
pass
else:
raise AchError(
"%s not in numeric or alpha numeric fields" % value
)
super(AddendaRecord, self).__setattr__(name, value)
def get_row(self):
ret_string = ''
ret_string += self.record_type_code +\
self.addenda_type_code
if self.std_ent_cls_code == 'MTE':
ret_string += self.trans_desc +\
self.net_id_code +\
self.term_id_code +\
self.trans_serial_code +\
self.trans_date +\
self.trans_time +\
self.terminal_loc +\
self.terminal_city +\
self.terminal_state +\
self.trace_num
elif self.std_ent_cls_code in ['POS', 'SHR']:
ret_string += self.ref_info_1 +\
self.ref_info_2 +\
self.term_id_code +\
self.trans_serial_code +\
self.trans_date +\
self.auth_card_exp +\
self.terminal_loc +\
self.terminal_city +\
self.terminal_state +\
self.trace_num
else:
ret_string += self.pmt_rel_info +\
self.add_seq_num +\
self.ent_det_seq_num
return ret_string
def get_count(self):
return len(self.get_row())
| junian/python-ach | ach/data_types.py | Python | mit | 25,867 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzer statistics handler."""
import datetime
import html
import re
import urllib.parse
from flask import request
from googleapiclient.errors import HttpError
import six
import yaml
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import access
from libs import handler
from libs import helpers
# Old fuzzer stats don't change, we could cache forever and it would never go
# stale. Since stats can get pretty large and probably aren't used much the day
# after first accessed, use a TTL reflecting this.
MEMCACHE_OLD_TTL_IN_SECONDS = 24 * 60 * 60
# New fuzzer stats change, and aren't as likely to be reaccessed, don't cache
# for very long.
MEMCACHE_TODAY_TTL_IN_SECONDS = 30 * 60
class QueryField(object):
"""Wrapped fuzzer_stats.QueryField with extra metadata."""
def __init__(self, field, results_index, field_type, bigquery_type):
self.field = field
self.results_index = results_index
self.field_type = field_type
self.bigquery_type = bigquery_type.lower()
class BuiltinField(object):
"""Wrapped fuzzer_stats.BuiltinField with extra metadata."""
def __init__(self, spec, field):
self.spec = spec
self.field = field
def _bigquery_type_to_charts_type(typename):
"""Convert bigquery type to charts type."""
typename = typename.lower()
if typename in ('integer', 'float'):
return 'number'
if typename == 'timestamp':
return 'date'
return 'string'
def _python_type_to_charts_type(type_value):
"""Convert bigquery type to charts type."""
if type_value in (int, float):
return 'number'
if type_value == datetime.date:
return 'date'
return 'string'
def _parse_date(date_str):
"""Parse YYYY-MM-DD."""
if not date_str:
return None
pattern = re.compile(r'^(\d{4})-(\d{2})-(\d{2})$')
match = pattern.match(date_str)
if not match:
return None
year, month, day = (int(val) for val in match.groups())
return datetime.date(year, month, day)
def _parse_stats_column_fields(results, stats_columns, group_by, fuzzer, jobs):
"""Parse stats columns."""
result = []
columns = fuzzer_stats.parse_stats_column_fields(stats_columns)
# Insert first column (group by)
group_by_field_name = fuzzer_stats.group_by_to_field_name(group_by)
columns.insert(0, fuzzer_stats.QueryField('j', group_by_field_name, None))
contexts = {}
for column in columns:
if isinstance(column, fuzzer_stats.QueryField):
key = '%s_%s' % (column.table_alias, column.select_alias)
for i, field_info in enumerate(results['schema']['fields']):
# the 'name' field could either be "prefix_fieldname" or simply
# "fieldname"
if (field_info['name'] == column.select_alias or
field_info['name'] == key):
result.append(
QueryField(column, i,
_bigquery_type_to_charts_type(field_info['type']),
field_info['type']))
break
elif isinstance(column, fuzzer_stats.BuiltinFieldSpecifier):
# Builtin field.
# Create new context if it does not exist.
field_class = column.field_class()
if not field_class:
continue
context_class = field_class.CONTEXT_CLASS
context = contexts.setdefault(context_class, context_class(fuzzer, jobs))
result.append(BuiltinField(column, column.create(context)))
return result
def _parse_group_by(group_by):
"""Parse group_by value."""
if group_by == 'by-day':
return fuzzer_stats.QueryGroupBy.GROUP_BY_DAY
if group_by == 'by-time':
return fuzzer_stats.QueryGroupBy.GROUP_BY_TIME
if group_by == 'by-revision':
return fuzzer_stats.QueryGroupBy.GROUP_BY_REVISION
if group_by == 'by-job':
return fuzzer_stats.QueryGroupBy.GROUP_BY_JOB
if group_by == 'by-fuzzer':
return fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER
return None
def _get_fuzzer_or_engine(name):
"""Return fuzzer entity, or engine this target is part of."""
fuzz_target = data_handler.get_fuzz_target(name)
if fuzz_target:
name = fuzz_target.engine
return data_types.Fuzzer.query(data_types.Fuzzer.name == name).get()
def _do_bigquery_query(query):
"""Return results from BigQuery."""
logs.log(query)
client = big_query.Client()
try:
results = client.raw_query(query, max_results=10000)
except HttpError as e:
raise helpers.EarlyExitException(str(e), 500)
if 'rows' not in results:
raise helpers.EarlyExitException('No stats.', 404)
return results
def _parse_stats_column_descriptions(stats_column_descriptions):
"""Parse stats column descriptions."""
if not stats_column_descriptions:
return {}
try:
result = yaml.safe_load(stats_column_descriptions)
for key, value in six.iteritems(result):
result[key] = html.escape(value)
return result
except yaml.parser.ParserError:
logs.log_error('Failed to parse stats column descriptions.')
return {}
def _build_columns(result, columns):
"""Build columns."""
for column in columns:
if isinstance(column, QueryField):
result['cols'].append({
'label': column.field.select_alias,
'type': column.field_type,
})
elif isinstance(column, BuiltinField):
result['cols'].append({
'label': column.spec.alias or column.spec.name,
'type': _python_type_to_charts_type(column.field.VALUE_TYPE),
})
def _try_cast(cell, value_str, cast_function, default_value):
"""Try casting the value_str into cast_function."""
try:
cell['v'] = cast_function(value_str)
except (ValueError, TypeError):
cell['v'] = default_value
cell['f'] = '--'
# FIXME: Break logic in this function into simpler helper functions.
def _build_rows(result, columns, rows, group_by):
"""Build rows."""
for row in rows:
row_data = []
first_column_value = None
for column in columns:
cell = {}
if isinstance(column, QueryField):
value = row['f'][column.results_index]['v']
if column.field.select_alias == 'time':
timestamp = float(value)
time = datetime.datetime.utcfromtimestamp(timestamp)
first_column_value = first_column_value or time
cell['v'] = 'Date(%d, %d, %d, %d, %d, %d)' % (
time.year, time.month - 1, time.day, time.hour, time.minute,
time.second)
elif column.field.select_alias == 'date':
timestamp = float(value)
date = datetime.datetime.utcfromtimestamp(timestamp).date()
first_column_value = first_column_value or date
cell['v'] = 'Date(%d, %d, %d)' % (date.year, date.month - 1, date.day)
elif column.bigquery_type == 'integer':
_try_cast(cell, value, int, 0)
elif column.bigquery_type == 'float':
# Round all float values to single digits.
_try_cast(cell, value, lambda s: round(float(s), 1), 0.0)
else:
cell['v'] = value
first_column_value = first_column_value or cell['v']
elif isinstance(column, BuiltinField):
data = column.field.get(group_by, first_column_value)
if data:
formatted_value = data.value
if data.link:
link = (
_get_cloud_storage_link(data.link)
if data.link.startswith('gs://') else data.link)
formatted_value = '<a href="%s">%s</a>' % (link, data.value)
if data.sort_key is not None:
cell['v'] = data.sort_key
else:
cell['v'] = data.value
if data.sort_key is not None or data.link:
cell['f'] = formatted_value
else:
cell['v'] = ''
cell['f'] = '--'
row_data.append(cell)
result['rows'].append({'c': row_data})
def _get_cloud_storage_link(bucket_path):
"""Return a clickable link to a cloud storage file given the bucket path."""
return '/gcs-redirect?' + urllib.parse.urlencode({'path': bucket_path})
def _get_filter_from_job(job):
"""Creates a job filter from |job|."""
return [str(job)] if job else None
def build_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around the caching wrappers for _build_results. Decides which of
those wrappers to call based on how long query should be cached for."""
datetime_end = _parse_date(date_end)
if not datetime_end:
raise helpers.EarlyExitException('Missing end date.', 400)
if datetime_end < utils.utcnow().date():
logs.log('Building results for older stats %s %s %s %s %s.' %
(fuzzer, jobs, group_by, date_start, date_end))
return _build_old_results(fuzzer, jobs, group_by, date_start, date_end)
logs.log('Building results for stats including today %s %s %s %s %s.' %
(fuzzer, jobs, group_by, date_start, date_end))
return _build_todays_results(fuzzer, jobs, group_by, date_start, date_end)
@memoize.wrap(memoize.Memcache(MEMCACHE_TODAY_TTL_IN_SECONDS))
def _build_todays_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around _build_results that is intended for use by queries where
date_end is today. Caches results for 15 minutes."""
return _build_results(fuzzer, jobs, group_by, date_start, date_end)
@memoize.wrap(memoize.Memcache(MEMCACHE_OLD_TTL_IN_SECONDS))
def _build_old_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around _build_results that is intended for use by queries where
date_end is before today. Caches results for 24 hours."""
return _build_results(fuzzer, jobs, group_by, date_start, date_end)
def _build_results(fuzzer, jobs, group_by, date_start, date_end):
"""Build results."""
date_start = _parse_date(date_start)
date_end = _parse_date(date_end)
if not fuzzer or not group_by or not date_start or not date_end:
raise helpers.EarlyExitException('Missing params.', 400)
fuzzer_entity = _get_fuzzer_or_engine(fuzzer)
if not fuzzer_entity:
raise helpers.EarlyExitException('Fuzzer not found.', 404)
if fuzzer_entity.stats_columns:
stats_columns = fuzzer_entity.stats_columns
else:
stats_columns = fuzzer_stats.JobQuery.DEFAULT_FIELDS
group_by = _parse_group_by(group_by)
if group_by is None:
raise helpers.EarlyExitException('Invalid grouping.', 400)
table_query = fuzzer_stats.TableQuery(fuzzer, jobs, stats_columns, group_by,
date_start, date_end)
results = _do_bigquery_query(table_query.build())
is_timeseries = group_by == fuzzer_stats.QueryGroupBy.GROUP_BY_TIME
result = {
'cols': [],
'rows': [],
'column_descriptions':
_parse_stats_column_descriptions(
fuzzer_entity.stats_column_descriptions),
'is_timeseries':
is_timeseries
}
columns = _parse_stats_column_fields(results, stats_columns, group_by, fuzzer,
jobs)
# If we are grouping by time and plotting graphs, skip builtin columns.
if is_timeseries:
columns = [c for c in columns if not isinstance(c, BuiltinField)]
_build_columns(result, columns)
_build_rows(result, columns, results['rows'], group_by)
return result
def _get_date(date_value, days_ago):
"""Returns |date_value| if it is not empty otherwise returns the date
|days_ago| number of days ago."""
if date_value:
return date_value
date_datetime = utils.utcnow() - datetime.timedelta(days=days_ago)
return date_datetime.strftime('%Y-%m-%d')
class Handler(base_handler.Handler):
"""Fuzzer stats main page handler."""
# pylint: disable=unused-argument
@handler.unsupported_on_local_server
@handler.get(handler.HTML)
def get(self, extra=None):
"""Handle a GET request."""
if not access.has_access():
# User is an external user of ClusterFuzz (eg: non-Chrome dev who
# submitted a fuzzer or someone with a project in OSS-Fuzz).
user_email = helpers.get_user_email()
fuzzers_list = external_users.allowed_fuzzers_for_user(
user_email, include_from_jobs=True, include_parents=True)
if not fuzzers_list:
# User doesn't actually have access to any fuzzers.
raise helpers.AccessDeniedException(
"You don't have access to any fuzzers.")
return self.render('fuzzer-stats.html', {})
class LoadFiltersHandler(base_handler.Handler):
"""Load filters handler."""
@handler.unsupported_on_local_server
@handler.get(handler.HTML)
def get(self):
"""Handle a GET request."""
project = request.get('project')
if access.has_access():
# User is an internal user of ClusterFuzz (eg: ClusterFuzz developer).
# Show all projects in the list, since this allows user to pick another
# project as needed.
projects_list = data_handler.get_all_project_names()
# Filter fuzzers and job list if a project is provided.
fuzzers_list = (
data_handler.get_all_fuzzer_names_including_children(
include_parents=True, project=project))
jobs_list = data_handler.get_all_job_type_names(project=project)
else:
# User is an external user of ClusterFuzz (eg: non-Chrome dev who
# submitted a fuzzer or someone with a project in OSS-Fuzz).
user_email = helpers.get_user_email()
# TODO(aarya): Filter fuzzer and job if |project| is provided.
fuzzers_list = sorted(
external_users.allowed_fuzzers_for_user(
user_email, include_from_jobs=True, include_parents=True))
if not fuzzers_list:
# User doesn't actually have access to any fuzzers.
raise helpers.AccessDeniedException(
"You don't have access to any fuzzers.")
jobs_list = sorted(external_users.allowed_jobs_for_user(user_email))
projects_list = sorted(
{data_handler.get_project_name(job) for job in jobs_list})
result = {
'projects': projects_list,
'fuzzers': fuzzers_list,
'jobs': jobs_list,
}
return self.render_json(result)
class LoadHandler(base_handler.Handler):
"""Load handler."""
def _check_user_access_and_get_job_filter(self, fuzzer, job):
"""Check whether the current user has access to stats for the fuzzer or job.
Returns a job filter that should be applied to the query."""
access_by_fuzzer_or_job = access.has_access(
fuzzer_name=fuzzer, job_type=job)
if access_by_fuzzer_or_job:
# User has full access to the fuzzer, or the specified job.
# None means no filters => all jobs.
return _get_filter_from_job(job)
if not job:
# Job not specified and user doesn't have full access to the fuzzer. Check
# if the user has any allowed jobs and use that as a filter.
allowed_jobs = external_users.allowed_jobs_for_user(
helpers.get_user_email())
if allowed_jobs:
return allowed_jobs
raise helpers.AccessDeniedException()
@handler.post(handler.JSON, handler.JSON)
def post(self):
"""Handle a POST request."""
fuzzer = request.get('fuzzer')
job = request.get('job')
group_by = request.get('group_by')
# If date_start is "": because the front end defaults to using a
# start_date 7 days ago, do the same.
date_start = _get_date(request.get('date_start'), 7)
# If date_end is "": don't get today's stats as they may not be
# available, use yesterdays (as the front end does by default).
date_end = _get_date(request.get('date_end'), 1)
job_filter = self._check_user_access_and_get_job_filter(fuzzer, job)
return self.render_json(
build_results(fuzzer, job_filter, group_by, date_start, date_end))
class PreloadHandler(base_handler.Handler):
"""Handler for the infrequent task of loading results for expensive stats
queries that are commonly accessed into the cache."""
def _get_fuzzer_job_filters(self):
"""Return list of fuzzer-job filter tuples."""
fuzzer_job_filters = []
for fuzzer_name in data_types.BUILTIN_FUZZERS:
fuzzer = data_types.Fuzzer.query(
data_types.Fuzzer.name == fuzzer_name).get()
for job in fuzzer.jobs:
fuzzer_job_filters.append((fuzzer_name, _get_filter_from_job(job)))
# None job is explicitly added for fuzzer query across all jobs.
fuzzer_job_filters.append((fuzzer_name, _get_filter_from_job(None)))
return fuzzer_job_filters
@handler.cron()
def get(self):
"""Handle a GET request."""
date_start = _get_date(None, 7)
date_end = _get_date(None, 1)
for fuzzer, job_filter in self._get_fuzzer_job_filters():
group_by = 'by-fuzzer'
try:
build_results(fuzzer, job_filter, group_by, date_start, date_end)
except Exception as e:
if 'No stats.' not in repr(e):
logs.log_error('Failed to preload %s %s %s %s %s.' %
(fuzzer, job_filter, group_by, date_start, date_end))
if not job_filter:
# Group by job only makes sense for queries that do not specify job.
group_by = 'by-job'
try:
build_results(fuzzer, job_filter, group_by, date_start, date_end)
except Exception as e:
if 'No stats.' not in repr(e):
logs.log_error('Failed to preload %s %s %s %s %s.' %
(fuzzer, job_filter, group_by, date_start, date_end))
class RefreshCacheHandler(base_handler.Handler):
"""Refresh cache."""
@handler.cron()
def get(self):
"""Handle a GET request."""
fuzzer_logs_context = fuzzer_stats.FuzzerRunLogsContext()
fuzz_targets = data_handler.get_fuzz_targets()
# Cache child fuzzer -> logs bucket mappings.
for fuzz_target in fuzz_targets:
# pylint: disable=protected-access,unexpected-keyword-arg
fuzzer_logs_context._get_logs_bucket_from_fuzzer(
fuzz_target.fully_qualified_name(), __memoize_force__=True)
| google/clusterfuzz | src/appengine/handlers/fuzzer_stats.py | Python | apache-2.0 | 18,828 |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('run_workflow')
@click.argument("workflow_id", type=str)
@click.option(
"--dataset_map",
help="A mapping of workflow inputs to datasets. The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``), LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``). The map must be in the following format: ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}`` (e.g. ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)",
type=str
)
@click.option(
"--params",
help="A mapping of non-datasets tool parameters (see below)",
type=str
)
@click.option(
"--history_id",
help="The encoded history ID where to store the workflow output. Alternatively, ``history_name`` may be specified to create a new history.",
type=str
)
@click.option(
"--history_name",
help="Create a new history with the given name to store the workflow output. If both ``history_id`` and ``history_name`` are provided, ``history_name`` is ignored. If neither is specified, a new 'Unnamed history' is created.",
type=str
)
@click.option(
"--import_inputs_to_history",
help="If ``True``, used workflow inputs will be imported into the history. If ``False``, only workflow outputs will be visible in the given history.",
is_flag=True
)
@click.option(
"--replacement_params",
help="pattern-based replacements for post-job actions (see below)",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, workflow_id, dataset_map="", params="", history_id="", history_name="", import_inputs_to_history=False, replacement_params=""):
"""Run the workflow identified by ``workflow_id``.
Output:
A dict containing the history ID where the outputs are placed
as well as output dataset IDs. For example::
{'history': '64177123325c9cfd',
'outputs': ['aa4d3084af404259']}
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
The ``replacement_params`` dict should map parameter names in
post-job actions (PJAs) to their runtime values. For
instance, if the final step has a PJA like the following::
{'RenameDatasetActionout_file1': {'action_arguments': {'newname': '${output}'},
'action_type': 'RenameDatasetAction',
'output_name': 'out_file1'}}
then the following renames the output dataset to 'foo'::
replacement_params = {'output': 'foo'}
see also `this email thread
<http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.
.. warning::
This method waits for the whole workflow to be scheduled before
returning and does not scale to large workflows as a result. This
method has therefore been deprecated in favor of
:meth:`invoke_workflow`, which also features improved default
behavior for dataset input handling.
"""
return ctx.gi.workflows.run_workflow(workflow_id, dataset_map=dataset_map, params=params, history_id=history_id, history_name=history_name, import_inputs_to_history=import_inputs_to_history, replacement_params=replacement_params)
| galaxy-iuc/parsec | parsec/commands/workflows/run_workflow.py | Python | apache-2.0 | 4,029 |
import math
from scipy.optimize import newton
PI = math.acos(-1)
def main() -> None:
A, B, C = map(int, input().split())
def g(t):
return A * t + B * math.sin(C * t * PI) - 100
def dg(t):
return A + B * math.cos(C * t * PI) * C * PI
def d2g(t):
return - B * math.sin(C * t * PI) * C * PI * C * PI
# root = newton(g, x0=100/A, tol=1e-6)
# 一階微分を使う場合
# root = newton(g, x0=100/A, fprime=dg, tol=1e-6)
# 二階微分を使う場合
root = newton(g, x0=100/A, fprime=dg, fprime2=d2g, tol=1e-6)
print(root)
if __name__ == '__main__':
main()
| knuu/competitive-programming | atcoder/abc/abc026_d_scipy.py | Python | mit | 630 |
import config
def generate_prefixes():
'''
Uses namespaces defined in the config.py file to generate all the prefixes you might need in a SPARQL query.
Returns a string.
'''
s = ''
for key in config.namespaces.keys():
if key is 'root':
s += 'PREFIX : <{name}> '.format(name=config.namespaces['root'])
else:
s += 'PREFIX {prefix}: <{name}> '.format(prefix=key, name=config.namespaces[key])
return s
| superphy/backend | app/modules/comparisons/sparql_utils.py | Python | apache-2.0 | 467 |
"""
Django settings for tango_with_django project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8t)p*iizkq7z3nmhml!$c$loficpzpin^rk5x%l7l@qln@bnsa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango',
'registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tango_with_django.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'tango_with_django.wsgi.application'
DB_NAME = os.environ.get("DB_NAME")
DB_USER = os.environ.get("DB_USER")
DB_PASSWORD = os.environ.get("DB_PASSWORD")
DB_HOST = os.environ.get("DB_HOST")
DB_PORT = os.environ.get("DB_PORT")
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST,
'PORT': DB_PORT
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Password hashing
# https://docs.djangoproject.com/en/1.10/topics/auth/passwords/
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
)
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
STATICFILES_DIRS = [STATIC_DIR, ]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root')
LOGIN_URL = '/accounts/login/'
# Registration related settings
REGISTRATION_OPEN = True
ACCOUNT_ACTIVATION_DAYS = 1
REGISTRATION_AUTO_LOGIN = True
LOGIN_REDIRECT_URL = '/rango/'
LOGIN_URL = '/accounts/login/' | raghuraju/tango-with-django-110 | tango_with_django/settings/base.py | Python | mit | 4,348 |
class AttrDict (dict):
def __getattr__ (self, k):
try:
return self[k]
except KeyError, detail:
raise AttributeError(detail)
def __setattr__ (self, k, v):
self[k] = v
| larsks/pydonet | lib/pydonet/utils/attrdict.py | Python | gpl-2.0 | 196 |
import os
import importlib
import logging
import json
import base64
from werkzeug.wrappers import Response
from .response_interface import ResponseInterface
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class ApigatewayResponse(ResponseInterface):
"""Prepare response for apigateway"""
def run_app(self, app, environ):
"""Run wsgi app and return result back to Lambda"""
response = Response.from_app(app, environ)
return self.translate(response, environ)
def translate(self, from_data, environ):
"""Translate data to apigateway"""
result = {}
result['body'] = from_data.data or ''
result['statusCode'] = from_data._status_code
result['headers'] = {}
for key, value in from_data.headers.iteritems():
result['headers'][key] = value
settings = importlib.import_module(os.environ["DJANGO_SETTINGS_MODULE"])
if settings.CORS_ENABLED:
result['headers']['Access-Control-Allow-Headers'] = 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,x-requested-with'
result['headers']['Access-Control-Allow-Methods'] = 'DELETE,GET,OPTIONS,PUT,POST'
result['headers']['Access-Control-Allow-Origin'] = '*'
# Binary support
default_mime_type = 'json'
if not from_data.mimetype.startswith("text/") \
and default_mime_type not in from_data.mimetype:
result['body'] = base64.b64encode(result['body'])
result["isBase64Encoded"] = "true"
self.response_log(from_data, environ)
return result
def response_log(self, response, environ):
"""Log response for assessment"""
log_str = '{} {} {} {}'.format(
environ['APIGATEWAY_REQUEST_CONTEXT']['httpMethod'],
environ['PATH_INFO'],
str(response._status_code),
environ['APIGATEWAY_REQUEST_CONTEXT']['identity']['userAgent'])
log_dict = {
"httpMethod": environ['APIGATEWAY_REQUEST_CONTEXT']['httpMethod'],
"path": environ['PATH_INFO'],
"userAgent": environ['APIGATEWAY_REQUEST_CONTEXT']['identity']['userAgent'],
"stage": environ['APIGATEWAY_REQUEST_CONTEXT']['stage'],
"responseStatusCode": response._status_code,
"responseDefaultMimeType": response.default_mimetype,
"responseMimeType": response.mimetype,
"responseContentEncoding": response.content_encoding
}
logger.info(log_str)
logger.info(json.dumps(log_dict))
| liangrog/lmdo | lmdo/lmdo_handlers/wsgi/wsgi_apps/response/apigateway_response.py | Python | mit | 2,625 |
import os
from setuptools import setup
from tornadose import __version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="tornadose",
version=__version__,
author="Michael V. DePalatis",
author_email="mike@depalatis.net",
description="Tornado-sent events",
license="MIT",
long_description=read("README.rst"),
keywords="tornado web eventsource websockets pubsub",
url="https://github.com/mivade/tornadose",
install_requires=["tornado>=5.1"],
packages=["tornadose"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
],
)
| mivade/tornadose | setup.py | Python | mit | 988 |
#! /usr/local/bin/python
"""
Garden health measurement system.
Reads humidity, temperature around a plant and in the soil, writes to db and
takes the picture of the plant.
"""
import time
import os
from datetime import datetime
import sqlalchemy
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
import Adafruit_MCP3008
import picamera
from DHT11_Python.dht11 import DHT11
import RPi.GPIO as GPIO
from plants.models import Plant
from generic_models import Base
from local_settings import DATABASE
class LivingPlantView(object):
"""Plant information processor Factory."""
DHT11_PIN = 17
# Software SPI configuration #pin:
CLK = 18
MISO = 23
MOSI = 24
CS = 25
DIR_NAME = 'grow_motion'
dry = 1023
wet = 600
normalize_wet = 600
delta_wet = dry - wet
average_frequency = 3
def __init__(self):
"""Create the LivingPlantView."""
engine = sqlalchemy.create_engine(URL(**DATABASE))
self.session = sessionmaker(bind=engine)
# Create table if it doesnt exist.
Base.metadata.create_all(engine)
self.model = Plant()
# Hardware SPI configuration:
# SPI_PORT = 0
# SPI_DEVICE = 0
# mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
if not os.path.exists(self.DIR_NAME):
print 'creating {} directory...'.format(self.DIR_NAME)
os.makedirs(self.DIR_NAME)
def process_item(self, data):
"""Save deals in the database.
This method is called for every item pipeline component.
"""
session = self.session()
obj = Plant(**data)
try:
session.add(obj)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return obj
def read_values(self):
"""Read all values from every sensor."""
avg_humid_percentage, time = self.take_soil_values()
temperature, humidity = self.take_ambient_values()
data = dict(
measured_at=datetime.fromtimestamp(float(time)),
name=time,
ambient_temperature=float(temperature) if temperature else None,
ambient_humidity=float(humidity) if humidity else None,
soil_humidity=float(avg_humid_percentage)
)
self.process_item(data)
def take_ambient_values(self):
"""
Read the dht11 module.
The custom dht11 module is on git@github.com:sinanm89/DHT11_Python.git
"""
instance = DHT11(pin=self.DHT11_PIN)
result = instance.read()
if result.is_valid():
print("Last valid input: " + str(datetime.now()))
print("Temperature: %d C" % result.temperature)
print("Humidity: %d %%" % result.humidity)
return result.temperature, result.humidity
return None, None
def take_soil_values(self):
"""
Read the moisture sensor connected to the MCP3008.
The important part is mcp.read_adc(channel_number), you can read up to
8 which is the number of inputs the physical chip has.
"""
avg_humid = 0
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
self.mcp = Adafruit_MCP3008.MCP3008(
clk=self.CLK, cs=self.CS, miso=self.MISO, mosi=self.MOSI)
for i in range(self.average_frequency):
# Read all the ADC channel values in a list
# The read_adc function will get the value of the channel (0-7)
temp_humid = self.mcp.read_adc(0)
avg_humid += temp_humid
time.sleep(0.5)
# (960 + 962 + 961) / 3
avg_humid = avg_humid / self.average_frequency
# 961 - (600)
avg_humid = avg_humid - self.normalize_wet
# 361 / 423 = 0.8, so its pretty moist.
avg_humid_percentage = float(avg_humid) / self.delta_wet
time.sleep(0.5)
now = datetime.now().strftime('%s')
pic_name = '{time}_0{humidity}.jpg'.format(
time=now, humidity=int(avg_humid_percentage * 100))
try:
with picamera.PiCamera(resolution=(1920, 1080)) as camera:
camera.vflip = True
camera.awb_mode = 'sunlight'
camera.contrast = 30
camera.sharpness = 100
time.sleep(2)
camera.capture('{0}/{1}'.format(self.DIR_NAME, pic_name))
except picamera.exc.PiCameraError:
print 'camera borked, moving on.'
except Exception, e:
print 'WOW FATAL EXCEPTION'
print e
print 'Took {0}'.format(pic_name)
return avg_humid_percentage, now
def main():
"""Main program."""
plant = LivingPlantView()
while True:
hour_now = datetime.now().hour
minutes_now = datetime.now().minute
# plant sleeps 6 hrs
if 4 < hour_now < 22:
plant.read_values()
sleeptime = 2 * 60
print 'Sleeping for 2 minutes, see you soon ;)'
else:
sleeptime = abs(4 - (hour_now - 24)) % 6 # hours to minutes
sleeptime = 6 if sleeptime == 0 else sleeptime
sleeptime = (sleeptime * 60) - minutes_now # minutes total
time.sleep(sleeptime)
if __name__ == '__main__':
main()
| sinanm89/grow_motion | main.py | Python | mit | 5,618 |
from google.appengine.ext import vendor
vendor.add('lib')
from google.appengine.api import urlfetch
urlfetch.set_default_fetch_deadline(60)
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.contrib import appengine
import datetime
import httplib2
import logging
import urllib2
import webapp2
# Silence extra logging from googleapiclient.
discovery.logger.setLevel(logging.WARNING)
KINTARO_HOST = 'kintaro-content-server.appspot.com'
KINTARO_API_ROOT = 'https://{host}/_ah/api'.format(host=KINTARO_HOST)
DISCOVERY_URL = (
KINTARO_API_ROOT + '/discovery/v1/apis/{api}/{apiVersion}/rest')
SCOPE = ('https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/kintaro')
APPID = app_identity.get_application_id()
SERVICE_ACCOUNT_EMAIL = '{}@appspot.gserviceaccount.com'.format(APPID)
def create_service():
credentials = appengine.AppAssertionCredentials(SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
credentials.refresh(http)
return discovery.build('content', 'v1', http=http,
discoveryServiceUrl=DISCOVERY_URL)
class Watch(ndb.Model):
last_run = ndb.DateTimeProperty()
modified = ndb.DateTimeProperty()
modified_by = ndb.StringProperty()
project_id = ndb.StringProperty()
repo_id = ndb.StringProperty()
request_method = ndb.StringProperty()
webhook_url = ndb.StringProperty()
def __repr__(self):
return '<Watch {}:{}/{}>'.format(
self.key.id(), self.repo_id, self.project_id)
def execute(self, kintaro, force=False):
try:
project = kintaro.projects().rpcGetProject(body={
'project_id': self.project_id,
}).execute()
except errors.HttpError as e:
logging.exception('Error fetching -> {}'.format(self))
return
self.modified_by = project['mod_info'].get('updated_by')
self.modified = datetime.datetime.fromtimestamp(
int(project['mod_info']['updated_on_millis']) / 1000.0)
if force or self.last_run is None or self.modified > self.last_run:
if self.webhook_url:
self.run_webhook(project)
else:
logging.info('Skipping (no webhook) -> {}'.format(self))
else:
logging.info('Skipping (up-to-date) -> {}'.format(self))
self.last_run = datetime.datetime.now()
self.put()
def run_webhook(self, project):
url = self.create_webhook_url(project)
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
resp = urllib2.urlopen(req, '{}')
logging.info('Webhook run -> {} ({})'.format(self, url))
def create_webhook_url(self, project):
kwargs = {
'project_created': project['mod_info']['created_on_millis'],
'project_created_by': project['mod_info']['created_by'],
'project_id': project['project_id'],
'project_modified': project['mod_info']['updated_on_millis'],
'project_modified_by': project['mod_info'].get('updated_by'),
'repo_id': project['repo_ids'][0],
'translations_up_to_date': project['translations_up_to_date'],
}
return self.webhook_url.format(**kwargs)
def process(force=False):
service = create_service()
query = Watch.query()
results = query.fetch()
for result in results:
result.execute(service, force=force)
class CronHandler(webapp2.RequestHandler):
def get(self):
process()
class RunHandler(webapp2.RequestHandler):
def get(self):
process(force=True)
class WatchHandler(webapp2.RequestHandler):
def get(self):
query = Watch.query()
results = query.fetch()
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(
'Share Kintaro with -> {}\n'.format(SERVICE_ACCOUNT_EMAIL))
for result in results:
self.response.out.write('{} -> {}\n'.format(result, result.webhook_url))
app = webapp2.WSGIApplication([
('/cron', CronHandler),
('/run', RunHandler),
('/', WatchHandler),
])
| grow/grow-ext-kintaro | watcher/main.py | Python | mit | 4,327 |
''' Plugin for CudaText editor
Authors:
Andrey Kvichansky (kvichans on github.com)
Version:
'2.3.13 2019-05-24'
ToDo: (see end of file)
'''
import re, os, sys, json, collections, itertools, webbrowser, tempfile, html, pickle, time, datetime
from itertools import *
from pathlib import PurePath as PPath
from pathlib import Path as Path
def first_true(iterable, default=False, pred=None):return next(filter(pred, iterable), default) # 10.1.2. Itertools Recipes
import cudatext as app
import cudatext_cmd as cmds
import cudax_lib as apx
from .cd_plug_lib import *
d = dict
class odict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
if args:super().__init__(*args)
elif kwargs:super().__init__(kwargs.items())
def __str__(self):
return '{%s}' % (', '.join("'%s':%r" % (k,v) for k,v in self.items()))
def __repr__(self):
return self.__str__()
pass; LOG = (-1== 1) or apx.get_opt('_opts_dlg_log',False) # Do or dont logging.
pass; from pprint import pformat
pass; pf=lambda d:pformat(d,width=150)
pass; pf80=lambda d:pformat(d,width=80)
pass; pf60=lambda d:pformat(d,width=60)
pass; ##!! waits correction
_ = get_translation(__file__) # I18N
MIN_API_VER = '1.0.168'
MIN_API_VER_4WR = '1.0.175' # vis
MIN_API_VER = '1.0.231' # listview has prop columns
MIN_API_VER = '1.0.236' # p, panel
MIN_API_VER = '1.0.237' # STATUSBAR_SET_CELL_HINT
VERSION = re.split('Version:', __doc__)[1].split("'")[1]
VERSION_V, \
VERSION_D = VERSION.split(' ')
MAX_HIST = apx.get_opt('ui_max_history_edits', 20)
CFG_JSON = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'cuda_options_editor.json'
HTM_RPT_FILE= str(Path(tempfile.gettempdir()) / 'CudaText_option_report.html')
FONT_LST = ['default'] \
+ [font
for font in app.app_proc(app.PROC_ENUM_FONTS, '')
if not font.startswith('@')]
pass; #FONT_LST=FONT_LST[:3]
def load_definitions(defn_path_or_json)->list:
""" Return
[{ opt:'opt name'
, def:<def val>
, cmt:'full comment'
, frm:'bool'|'float'|'int'|'str'| # simple
'int2s'|'strs'|'str2s'| # list/dict
'font'|'font-e'| # font non-empty/can-empty
'#rgb'|'#rgb-e'| # color non-empty/can-empty
'hotk'|'file'|'json'|
'unk'
, lst:[str] for frm==ints
, dct:[(num,str)] for frm==int2s
, [(str,str)] for frm==str2s
, chp:'chapter/chapter'
, tgs:['tag',]
}]
"""
pass; #LOG and log('defn_path_or_json={}',(defn_path_or_json))
kinfs = []
lines = defn_path_or_json \
if str==type(defn_path_or_json) else \
defn_path_or_json.open(encoding='utf8').readlines()
if lines[0][0]=='[':
# Data is ready - SKIP parsing
json_bd = defn_path_or_json \
if str==type(defn_path_or_json) else \
defn_path_or_json.open(encoding='utf8').read()
kinfs = json.loads(json_bd, object_pairs_hook=odict)
for kinf in kinfs:
pass; #LOG and log('opt in kinf={}',('opt' in kinf))
if isinstance(kinf['cmt'], list):
kinf['cmt'] = '\n'.join(kinf['cmt'])
upd_cald_vals(kinfs, '+def')
for kinf in kinfs:
kinf['jdc'] = kinf.get('jdc', kinf.get('dct', []))
kinf['jdf'] = kinf.get('jdf', kinf.get('def', ''))
return kinfs
l = '\n'
#NOTE: parse_raw
reTags = re.compile(r' *\((#\w+,?)+\)')
reN2S = re.compile(r'^\s*(\d+): *(.+)' , re.M)
reS2S = re.compile(r'^\s*"(\w*)": *(.+)' , re.M)
# reLike = re.compile(r' *\(like (\w+)\)') ##??
reFldFr = re.compile(r'\s*Folders from: (.+)')
def parse_cmnt(cmnt, frm):#, kinfs):
tags= set()
mt = reTags.search(cmnt)
while mt:
tags_s = mt.group(0)
tags |= set(tags_s.strip(' ()').replace('#', '').split(','))
cmnt = cmnt.replace(tags_s, '')
mt = reTags.search(cmnt)
dctN= [[int(m.group(1)), m.group(2).rstrip(', ')] for m in reN2S.finditer(cmnt+l)]
dctS= [[ m.group(1) , m.group(2).rstrip(', ')] for m in reS2S.finditer(cmnt+l)]
lstF= None
mt = reFldFr.search(cmnt)
if mt:
from_short = mt.group(1)
from_dir = from_short if os.path.isabs(from_short) else os.path.join(app.app_path(app.APP_DIR_DATA), from_short)
pass; #LOG and log('from_dir={}',(from_dir))
if not os.path.isdir(from_dir):
log(_('No folder "{}" from\n{}'), from_short, cmnt)
else:
lstF = [d for d in os.listdir(from_dir) if os.path.isdir(from_dir+os.sep+d) and d.strip()]
lstF = sorted(lstF)
pass; #LOG and log('lstF={}',(lstF))
frm,\
lst = ('strs' , lstF) if lstF else \
(frm , [] )
frm,\
dct = ('int2s', dctN) if dctN else \
('str2s', dctS) if dctS else \
(frm , [] )
return cmnt, frm, dct, lst, list(tags)
#def parse_cmnt
def jsstr(s):
return s[1:-1].replace(r'\"','"').replace(r'\\','\\')
reChap1 = re.compile(r' *//\[Section: +(.+)\]')
reChap2 = re.compile(r' *//\[(.+)\]')
reCmnt = re.compile(r' *//(.+)')
reKeyDV = re.compile(r' *"(\w+)" *: *(.+)')
reInt = re.compile(r' *(-?\d+)')
reFloat = re.compile(r' *(-?\d+\.\d+)')
reFontNm= re.compile(r'font\w*_name')
reHotkey= re.compile(r'_hotkey_')
reColor = re.compile(r'_color$')
chap = ''
pre_cmnt= ''
pre_kinf= None
cmnt = ''
for line in lines:
if False:pass
elif reChap1.match(line):
mt= reChap1.match(line)
chap = mt.group(1)
cmnt = ''
elif reChap2.match(line):
mt= reChap2.match(line)
chap = mt.group(1)
cmnt = ''
elif reCmnt.match(line):
mt= reCmnt.match(line)
cmnt += l+mt.group(1)
elif reKeyDV.match(line):
mt= reKeyDV.match(line)
key = mt.group(1)
dval_s = mt.group(2).rstrip(', ')
dfrm,dval= \
('bool', True ) if dval_s=='true' else \
('bool', False ) if dval_s=='false' else \
('float',float(dval_s)) if reFloat.match(dval_s) else \
('int', int( dval_s)) if reInt.match(dval_s) else \
('font', dval_s[1:-1] ) if reFontNm.search(key) else \
('hotk', dval_s[1:-1] ) if reHotkey.search(key) else \
('#rgb', dval_s[1:-1] ) if reColor.search(key) else \
('str', jsstr(dval_s)) if dval_s[0]=='"' and dval_s[-1]=='"' else \
('unk', dval_s )
dfrm,dval=('#rgb-e','' ) if dfrm=='#rgb' and dval=='' else \
(dfrm, dval )
pass; #LOG and log('key,dval_s,dfrm,dval={}',(key,dval_s,dfrm,dval))
cmnt = cmnt.strip(l) if cmnt else pre_cmnt
ref_frm = cmnt[:3]=='...'
pre_cmnt= cmnt if cmnt else pre_cmnt
pass; #LOG and log('ref_frm,pre_cmnt,cmnt={}',(ref_frm,pre_cmnt,cmnt))
cmnt = cmnt.lstrip('.'+l)
dfrm = 'font-e' if dfrm=='font' and 'Empty string is allowed' in cmnt else dfrm
kinf = odict()
kinfs += [kinf]
kinf['opt'] = key
kinf['def'] = dval
kinf['cmt'] = cmnt.strip()
kinf['frm'] = dfrm
if dfrm in ('int','str'):
cmnt,frm,\
dct,lst,tags = parse_cmnt(cmnt, dfrm)#, kinfs)
kinf['cmt'] = cmnt.strip()
if frm!=dfrm:
kinf['frm'] = frm
if dct:
kinf['dct'] = dct
if lst:
kinf['lst'] = lst
if tags:
kinf['tgs'] = tags
if dfrm=='font':
kinf['lst'] = FONT_LST
if dfrm=='font-e':
kinf['lst'] = [''] + FONT_LST
if chap:
kinf['chp'] = chap
if ref_frm and pre_kinf:
# Copy frm data from prev oi
pass; #LOG and log('Copy frm pre_kinf={}',(pre_kinf))
kinf[ 'frm'] = pre_kinf['frm']
if 'dct' in pre_kinf:
kinf['dct'] = pre_kinf['dct']
if 'lst' in pre_kinf:
kinf['lst'] = pre_kinf['lst']
pre_kinf= kinf.copy()
cmnt = ''
#for line
pass; #open(str(defn_path_or_json)+'.p.json', 'w').write(json.dumps(kinfs,indent=2))
upd_cald_vals(kinfs, '+def')
for kinf in kinfs:
kinf['jdc'] = kinf.get('jdc', kinf.get('dct', []))
kinf['jdf'] = kinf.get('jdf', kinf.get('def', ''))
return kinfs
#def load_definitions
def load_vals(opt_dfns:list, lexr_json='', ed_=None, full=False, user_json='user.json')->odict:
""" Create reformated copy (as odict) of
definitions data opt_dfns (see load_definitions)
If ed_ then add
'fval'
for some options
If full==True then append optitions without definition
but only with
{ opt:'opt name'
, frm:'int'|'float'|'str'
, uval:<value from user.json>
, lval:<value from lexer*.json>
}}
Return
{'opt name':{ opt:'opt name', frm:
? , def:, cmt:, dct:, chp:, tgs:
? , uval:<value from user.json>
? , lval:<value from lexer*.json>
? , fval:<value from ed>
}}
"""
user_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+user_json
lexr_def_json = apx.get_def_setting_dir() +os.sep+lexr_json
lexr_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+lexr_json
user_vals = apx._json_loads(open(user_json , encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(user_json) else {}
lexr_def_vals = apx._json_loads(open(lexr_def_json, encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(lexr_def_json) else {}
lexr_vals = apx._json_loads(open(lexr_json , encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(lexr_json) else {}
pass; #LOG and log('lexr_vals={}',(lexr_vals))
pass; #LOG and log('lexr_def_vals={}',(lexr_def_vals))
# Fill vals for defined opt
pass; #LOG and log('no opt={}',([oi for oi in opt_dfns if 'opt' not in oi]))
oinf_valed = odict([(oi['opt'], oi) for oi in opt_dfns])
for opt, oinf in oinf_valed.items():
if opt in lexr_def_vals: # Correct def-vals for lexer
oinf['dlx'] = True
oinf['def'] = lexr_def_vals[opt]
oinf['jdf'] = oinf['def']
if opt in user_vals: # Found user-val for defined opt
oinf['uval'] = user_vals[opt]
if opt in lexr_vals: # Found lexer-val for defined opt
oinf['lval'] = lexr_vals[opt]
if ed_ and opt in apx.OPT2PROP: # Found file-val for defined opt
fval = ed_.get_prop(apx.OPT2PROP[opt])
oinf['fval'] =fval
if full:
# Append item for non-defined opt
reFontNm = re.compile(r'font\w*_name')
def val2frm(val, opt=''):
pass; #LOG and log('opt,val={}',(opt,val))
return ('bool' if isinstance(val, bool) else
'int' if isinstance(val, int) else
'float' if isinstance(val, float) else
'json' if isinstance(val, list) or
isinstance(val, dict) else
'hotk' if '_hotkey_' in val else
'font' if isinstance(val, str) and
reFontNm.search(val) else
'str')
for uop,uval in user_vals.items():
if uop in oinf_valed: continue
oinf_valed[uop] = odict(
[ ('opt' ,uop)
, ('frm' ,val2frm(uval,uop))
, ('uval' ,uval)
]+([('lval' ,lexr_vals[uop])] if uop in lexr_vals else [])
)
for lop,lval in lexr_vals.items():
if lop in oinf_valed: continue
oinf_valed[lop] = odict(
[ ('opt' ,lop)
, ('frm' ,val2frm(lval,lop))
, ('lval' ,lval)
])
upd_cald_vals(oinf_valed)
upd_cald_vals(oinf_valed, '+def') if lexr_def_vals else None # To update oi['jdf'] by oi['def']
return oinf_valed
#def load_vals
def upd_cald_vals(ois, what=''):
# Fill calculated attrs
if '+def' in what:
for oi in [oi for oi in ois if 'dct' in oi]:
dct = oi['dct']
dval= oi['def']
dc = odict(dct)
pass; #LOG and log('dct={}',(dct))
oi['jdc'] = [f('({}) {}', vl, cm ) for vl,cm in dct]
oi['jdf'] = f('({}) {}', dval, dc[dval])
pass; #LOG and log('oi={}',(oi))
# Fill calculated attrs
if not what or '+clcd' in what:
for op, oi in ois.items():
oi['!'] = ('L' if oi.get('dlx') else '') \
+ ('+!!' if 'def' not in oi and 'lval' in oi else
'+!' if 'def' not in oi and 'uval' in oi else
'!!!' if 'fval' in oi
and oi['fval'] != oi.get('lval'
, oi.get('uval'
, oi.get( 'def'))) else
'!!' if 'lval' in oi else
'!' if 'uval' in oi else
'')
dct = odict(oi.get('dct', []))
oi['juvl'] = oi.get('uval', '') \
if not dct or 'uval' not in oi else \
f('({}) {}', oi['uval'], dct[oi['uval']])
oi['jlvl'] = oi.get('lval', '') \
if not dct or 'lval' not in oi else \
f('({}) {}', oi['lval'], dct[oi['lval']])
oi['jfvl'] = oi.get('fval', '') \
if not dct or 'fval' not in oi else \
f('({}) {}', oi['fval'], dct[oi['fval']])
#def upd_cald_vals
#class OptDt:
# """ Options infos to view/change in dlg.
# Opt getting is direct - by fields.
# Opt setting only by methods.
# """
#
# def __init__(self
# , keys_info=None # Ready data
# , path_raw_keys_info='' # default.json
# , path_svd_keys_info='' # To save parsed default.json
# , bk_sets=False # Create backup of settings before the first change
# ):
# self.defn_path = Path(path_raw_keys_info)
# self.bk_sets = bk_sets # Need to backup
# self.bk_files = {} # Created backup files
#
# self.opts_defn = {} # Meta-info for options: format, comment, dict/list of values, chapter, tags
# self.ul_opts = {} # Total options info for user+cur_lexer
# #def __init__
#
# #class OptDt
_SORT_NO = -1
_SORT_DN = 0
_SORT_UP = 1
_SORT_TSGN = {_SORT_NO:'', _SORT_UP:'↑', _SORT_DN:'↓'}
_SORT_NSGN = {-1:'', 0:'', 1:'²', 2:'³'}
_SORT_NSGN.update({n:str(1+n) for n in range(3,10)})
_sort_pfx = lambda to,num: '' if to==_SORT_NO else _SORT_TSGN[to]+_SORT_NSGN[num]+' '
_next_sort = lambda to: ((1 + 1+to) % 3) - 1
_inve_sort = lambda to: 1 - to
sorts_dflt = lambda cols: [[_SORT_NO, -1] for c in range(cols)]
sorts_sign = lambda sorts, col: _sort_pfx(sorts[col][0], sorts[col][1])
sorts_on = lambda sorts, col: sorts[col][0] != _SORT_NO
def sorts_turn(sorts, col, scam=''):
""" Switch one of sorts """
max_num = max(tn[1] for tn in sorts)
tn_col = sorts[col]
if 0:pass
elif 'c'==scam and tn_col[1]==max_num: # Turn col with max number
tn_col[0] = _next_sort(tn_col[0])
tn_col[1] = -1 if tn_col[0]==_SORT_NO else tn_col[1]
elif 'c'==scam: # Add new or turn other col
tn_col[0] = _next_sort(tn_col[0]) if -1==tn_col[1] else _inve_sort(tn_col[0])
tn_col[1] = max_num+1 if -1==tn_col[1] else tn_col[1]
else:#not scam: # Only col
for cl,tn in enumerate(sorts):
tn[0] = _next_sort(tn_col[0]) if cl==col else _SORT_NO
tn[1] = 0 if cl==col else -1
return sorts
#def sorts_turn
def sorts_sort(sorts, tdata):
""" Sort tdata (must contain only str) by sorts """
pass; #log('tdata={}',(tdata))
pass; #log('sorts={}',(sorts))
max_num = max(tn[1] for tn in sorts)
if -1==max_num: return tdata
def push(lst, v):
lst.append(v)
return lst
prep_str = lambda s,inv: (chr(0x10FFFF) # To move empty to bottom
if not s else
s
if not inv else
''.join(chr(0x10FFFF - ord(c)) for c in s) # 0x10FFFF from chr() doc
)
td_keys = [[r] for r in tdata]
for srt_n in range(1+max_num):
srt_ctn = first_true(((c,tn) for c,tn in enumerate(sorts)), None
,lambda ntn: ntn[1][1]==srt_n)
assert srt_ctn is not None
srt_c = srt_ctn[0]
inv = srt_ctn[1][0]==_SORT_UP
td_keys = [push(r, prep_str(r[0][srt_c], inv)) for r in td_keys]
td_keys.sort(key=lambda r: r[1:])
tdata = [r[0] for r in td_keys] # Remove appended cols
return tdata
#def sorts_sort
class OptEdD:
SCROLL_W= app.app_proc(app.PROC_GET_GUI_HEIGHT, 'scrollbar') if app.app_api_version()>='1.0.233' else 15
COL_SEC = 0
COL_NAM = 1
COL_OVR = 2
COL_DEF = 3
COL_USR = 4
COL_LXR = 5
COL_FIL = 6
COL_LEXR= _('Lexer')
COL_FILE= _('File "{}"')
COL_NMS = (_('Section'), _('Option'), '!', _('Default'), ('User'), COL_LEXR, COL_FILE)
COL_MWS = [ 70, 210, 25, 120, 120, 70, 50] # Min col widths
# COL_MWS = [ 70, 150, 25, 120, 120, 70, 50] # Min col widths
COL_N = len(COL_MWS)
CMNT_MHT= 60 # Min height of Comment
STBR_FLT= 10
STBR_ALL= 11
STBR_MSG= 12
STBR_H = apx.get_opt('ui_statusbar_height',24)
FILTER_C= _('&Filter')
NO_CHAP = _('_no_')
CHPS_H = f(_('Choose section to append in "{}".'
'\rHold Ctrl to add several sections.'
), FILTER_C).replace('&', '')
FLTR_H = _('Suitable options will contain all specified words.'
'\r Tips and tricks:'
'\r • Add "#" to search the words also in comments.'
'\r • Add "@sec" to show options from section with "sec" in name.'
'\r Several sections are allowed.'
'\r Click item in menu "Section..." with Ctrl to add it.'
'\r • To show only overridden options:'
'\r - Add "!" to show only User+Lexer+File.'
'\r - Add "!!" to show only Lexer+File'
'\r - Add "!!!" to show only File.'
'\r • Use "<" or ">" for word boundary.'
'\r Example: '
'\r size> <tab'
'\r selects "tab_size" but not "ui_tab_size" or "tab_size_x".'
'\r • Alt+L - Clear filter')
LOCV_C = _('Go to "{}" in user/lexer config file')
LOCD_C = _('Go to "{}" in default config file')
OPME_H = _('Edit JSON value')
TOOP_H = f(_('Close dialog and open user/lexer settings file'
'\rto edit the current option.'
'\rSee also menu command'
'\r {}'), f(LOCD_C, '<option>'))
LIFL_C = _('Instant filtering')
FULL_C = _('Show &all keys in user/lexer configs')
@staticmethod
def prep_sorts(sorts):
M = OptEdD
if len(sorts)==len(M.COL_NMS):
return sorts
return sorts_dflt(len(M.COL_NMS))
def __init__(self
, path_keys_info ='' # default.json or parsed data (file or list_of_dicts)
, subset ='' # To get/set from/to cuda_options_editor.json
, how ={} # Details to work
):
M,m = self.__class__,self
m.ed = ed
m.how = how
m.defn_path = Path(path_keys_info) if str==type(path_keys_info) else json.dumps(path_keys_info)
m.subset = subset
m.stores = get_hist('dlg'
, json.loads(open(CFG_JSON).read(), object_pairs_hook=odict)
if os.path.exists(CFG_JSON) else odict())
pass; #LOG and log('ok',())
# m.bk_sets = m.stores.get(m.subset+'bk_sets' , False)
m.lexr_l = app.lexer_proc(app.LEXER_GET_LEXERS, False)
m.lexr_w_l = [f('{} {}'
,'!!' if os.path.isfile(app.app_path(app.APP_DIR_SETTINGS)+os.sep+'lexer '+lxr+'.json') else ' '
, lxr)
for lxr in m.lexr_l]
m.cur_op = m.stores.get(m.subset+'cur_op' , '') # Name of current option
m.col_ws = m.stores.get(m.subset+'col_ws' , M.COL_MWS[:])
m.col_ws = m.col_ws if M.COL_N==len(m.col_ws) else M.COL_MWS[:]
m.h_cmnt = m.stores.get(m.subset+'cmnt_heght', M.CMNT_MHT)
m.sorts = m.stores.get(m.subset+'sorts' , [] ) # Def sorts is no sorts
m.live_fltr = m.stores.get(m.subset+'live_fltr' , False) # To filter after each change and no History
m.cond_hl = [s for s in m.stores.get(m.subset+'h.cond', []) if s] if not m.live_fltr else []
m.cond_s = '' if M.restart_cond is None else M.restart_cond # String filter
m.ops_only = [] # Subset to show (future)
m.sorts = M.prep_sorts(m.sorts)
m.lexr = m.ed.get_prop(app.PROP_LEXER_CARET)
m.all_ops = m.stores.get(m.subset+'all_ops' , False) # Show also options without definition
m.opts_defn = {} # Meta-info for options: format, comment, dict of values, chapter, tags
m.opts_full = {} # Show all options
m.chp_tree = {} # {'Ui':{ops:[], 'kids':{...}, 'path':'Ui/Tabs'}
m.pth2chp = {} # path-index for m.chp_tree
# Cache
m.SKWULFs = [] # Last filtered+sorted
m.cols = [] # Last info about listview columns
m.itms = [] # Last info about listview cells
# m.bk_files = {}
# m.do_file('backup-user') if m.bk_sets else 0
m.do_file('load-data')
m.for_ulf = 'u' # 'u' for User, 'l' for Lexer, 'f' for File
m.cur_op = m.cur_op if m.cur_op in m.opts_full else '' # First at start
m.cur_in = 0 if m.cur_op else -1
m.stbr = None # Handle for statusbar_proc
m.locate_on_exit = None
m.chng_rpt = [] # Report of all changes by user
m.apply_one = m.stores.get(m.subset+'apply_one', False) # Do one call OpsReloadAndApply on exit
m.apply_need= False # Need to call OpsReloadAndApply
m.auto4file = m.stores.get(m.subset+'auto4file', True) # Auto reset file value to over value def/user/lex
#def __init__
def stbr_act(self, tag=None, val='', opts={}):
M,m = self.__class__,self
if not m.stbr: return
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_TEXT, tag=tag, value=str(val))
#def stbr_act
def do_file(self, what, data='', opts={}):
M,m = self.__class__,self
if False:pass
elif what=='load-data':
pass; #LOG and log('',)
m.opts_defn = load_definitions(m.defn_path)
pass; #LOG and log('m.opts_defn={}',pf([o for o in m.opts_defn]))
pass; #LOG and log('m.opts_defn={}',pf([o for o in m.opts_defn if '2s' in o['frm']]))
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
, ed_=m.ed, full=m.all_ops)
m.cur_op = m.cur_op if m.cur_op in m.opts_full else ''
pass; #LOG and log('m.opts_full={}',pf(m.opts_full))
m.do_file('build-chp-tree')
elif what=='build-chp-tree':
# Build chapter tree
m.chp_tree = odict(ops=list(m.opts_full.keys())
,kids=odict()
,path='') # {chp:{ops:[], kids:{...}, path:'c1/c2'}
m.pth2chp = {} # {path:chp}
for op,oi in m.opts_full.items():
chp_s = oi.get('chp', M.NO_CHAP)
chp_s = chp_s if chp_s else M.NO_CHAP
chp_node= m.chp_tree # Start root to move
kids = chp_node['kids']
path =''
for chp in chp_s.split('/'):
# Move along branch and create nodes if need
chp_node = kids.setdefault(chp, odict())
path += ('/'+chp) if path else chp
chp_node['path']= path
m.pth2chp[path] = chp_node
ops_l = chp_node.setdefault('ops', [])
ops_l += [op]
if not ('/'+chp_s).endswith('/'+chp): # not last
kids = chp_node.setdefault('kids', odict())
pass; #LOG and log('m.chp_tree=¶{}',pf60(m.chp_tree))
pass; #LOG and log('m.pth2chp=¶{}',pf60(m.pth2chp))
elif what == 'locate_to':
to_open = data['path']
find_s = data['find']
app.file_open(to_open) ##!!
pass; #log('to_open={}',(to_open))
pass; #log('ed.get_filename()={}',(ed.get_filename()))
m.ag.opts['on_exit_focus_to_ed'] = ed
# Locate
user_opt= app.app_proc(app.PROC_GET_FINDER_PROP, '') \
if app.app_api_version()>='1.0.248' else \
app.app_proc(app.PROC_GET_FIND_OPTIONS, '') # Deprecated
pass; #log('ed_to_fcs.get_filename()={}',(ed_to_fcs.get_filename()))
pass; #log('ed.get_filename()={}',(ed.get_filename()))
pass; #LOG and log('find_s={!r}',(find_s))
ed.cmd(cmds.cmd_FinderAction, chr(1).join(['findnext', find_s, '', 'fa'])) # f - From-caret, a - Wrap
if app.app_api_version()>='1.0.248':
app.app_proc(app.PROC_SET_FINDER_PROP, user_opt)
else:
app.app_proc(app.PROC_SET_FIND_OPTIONS, user_opt) # Deprecated
elif what in ('locate-def', 'locate-opt', 'goto-def', 'goto-opt', ):
if not m.cur_op:
m.stbr_act(M.STBR_MSG, _('Choose option to find in config file'))
return False
oi = m.opts_full[m.cur_op]
pass; #LOG and log('m.cur_op,oi={}',(m.cur_op,oi))
to_open = ''
if what in ('locate-opt', 'goto-opt'):
if 'uval' not in oi and m.for_ulf=='u':
m.stbr_act(M.STBR_MSG, f(_('No user value for option "{}"'), m.cur_op))
return False
if 'lval' not in oi and m.for_ulf=='l':
m.stbr_act(M.STBR_MSG, f(_('No lexer "{}" value for option "{}"'), m.lexr, m.cur_op))
return False
to_open = 'lexer '+m.lexr+'.json' if m.for_ulf=='l' else 'user.json'
to_open = app.app_path(app.APP_DIR_SETTINGS)+os.sep+to_open
else:
if 'def' not in oi:
m.stbr_act(M.STBR_MSG, f(_('No default for option "{}"'), m.cur_op))
return False
to_open = str(m.defn_path)
if not os.path.exists(to_open):
log('No file={}',(to_open))
return False
find_s = f('"{}"', m.cur_op)
if what in ('goto-def', 'goto-opt'):
m.locate_on_exit = d(path=to_open, find=find_s)
return True #
m.do_file('locate_to', d(path=to_open, find=find_s))
return False
#elif what=='set-dfns':
# m.defn_path = data
# m.do_file('load-data')
# return d(ctrls=odict(m.get_cnts('lvls')))
elif what=='set-lexr':
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
,ed_=m.ed, full=m.all_ops)
return d(ctrls=odict(m.get_cnts('lvls')))
elif what=='out-rprt':
if do_report(HTM_RPT_FILE, 'lexer '+m.lexr+'.json', m.ed):
webbrowser.open_new_tab('file://' +HTM_RPT_FILE)
app.msg_status('Opened browser with file '+HTM_RPT_FILE)
return []
#def do_file
def _prep_opt(self, opts='', ind=-1, nm=None):
""" Prepare vars to show info about current option by
m.cur_op
m.lexr
Return
{} vi-attrs
{} en-attrs
{} val-attrs
{} items-attrs
"""
M,m = self.__class__,self
if opts=='key2ind':
opt_nm = nm if nm else m.cur_op
m.cur_in= index_1([m.SKWULFs[row][1] for row in range(len(m.SKWULFs))], opt_nm, -1)
return m.cur_in
if opts=='ind2key':
opt_in = ind if -1!=ind else m.ag.cval('lvls')
m.cur_op= m.SKWULFs[opt_in][1] if -1<opt_in<len(m.SKWULFs) else ''
return m.cur_op
if opts=='fid4ed':
if not m.cur_op: return 'lvls'
frm = m.opts_full[m.cur_op]['frm']
fid = 'eded' if frm in ('str', 'int', 'float') else \
'edcb' if frm in ('int2s', 'str2s', 'strs', 'font', 'font-e') else \
'edrf' if frm in ('bool',) else \
'brow' if frm in ('hotk', 'file', '#rgb', '#rgb-e') else \
'opjs' if frm in ('json') else \
'lvls'
pass; #LOG and log('m.cur_op,frm,fid={}',(m.cur_op,frm,fid))
return fid
pass; #LOG and log('m.cur_op, m.lexr={}',(m.cur_op, m.lexr))
vis,ens,vas,its,bcl = {},{},{},{},{}
vis['edcl'] = vis['dfcl'] = False
bcl['edcl'] = bcl['dfcl'] = 0x20000000
# bcl['eded'] = bcl['dfvl'] = 0x20000000
ens['eded'] = ens['setd'] = False # All un=F
vis['eded'] = vis['edcb']=vis['edrf']=vis['edrt']=vis['brow']=vis['toop']=vis['opjs'] = False # All vi=F
vas['eded'] = vas['dfvl']=vas['cmnt']= '' # All ed empty
vas['edcb'] = -1
vas['edrf'] = vas['edrt'] = False
its['edcb'] = []
ens['dfvl'] = True
ens['tofi'] = m.cur_op in apx.OPT2PROP
if m.for_ulf=='l' and m.lexr not in m.lexr_l:
# Not selected lexer
vis['eded'] = True
ens['dfvl'] = False
return vis,ens,vas,its,bcl
if m.for_ulf=='f' and m.cur_op not in apx.OPT2PROP:
# No the option for File
vis['eded'] = True
ens['dfvl'] = False
return vis,ens,vas,its,bcl
if not m.cur_op:
# No current option
vis['eded'] = True
else:
# Current option
oi = m.opts_full[m.cur_op]
pass; #LOG and log('oi={}',(oi))
vas['dfvl'] = str(oi.get('jdf' , '')).replace('True', 'true').replace('False', 'false')
vas['uval'] = oi.get('uval', '')
vas['lval'] = oi.get('lval', '')
vas['fval'] = oi.get('fval', '')
vas['cmnt'] = oi.get('cmt' , '')
frm = oi['frm']
ulfvl_va = vas['fval'] \
if m.for_ulf=='f' else \
vas['lval'] \
if m.for_ulf=='l' else \
vas['uval'] # Cur val with cur state of "For lexer"
ens['eded'] = frm not in ('json', 'hotk', 'file')#, '#rgb', '#rgb-e')
ens['setd'] = frm not in ('json',) and ulfvl_va is not None
if False:pass
elif frm in ('json'):
# vis['toop'] = True
vis['opjs'] = True
vis['eded'] = True
vas['eded'] = str(ulfvl_va)
elif frm in ('str', 'int', 'float'):
vis['eded'] = True
vas['eded'] = str(ulfvl_va)
elif frm in ('hotk', 'file', '#rgb', '#rgb-e'):
vis['eded'] = True
vis['brow'] = True
vas['eded'] = str(ulfvl_va)
vis['edcl'] = frm in ('#rgb', '#rgb-e')
vis['dfcl'] = frm in ('#rgb', '#rgb-e')
bcl['edcl'] = apx.html_color_to_int(ulfvl_va ) if frm in ('#rgb', '#rgb-e') and ulfvl_va else 0x20000000
bcl['dfcl'] = apx.html_color_to_int(vas['dfvl'] ) if frm in ('#rgb', '#rgb-e') and vas['dfvl'] else 0x20000000
elif frm in ('bool',):
vis['edrf'] = True
vis['edrt'] = True
vas['edrf'] = ulfvl_va==False
vas['edrt'] = ulfvl_va==True
elif frm in ('int2s', 'str2s'):
vis['edcb'] = True
ens['edcb'] = True
its['edcb'] = oi['jdc']
vas['edcb'] = index_1([k for (k,v) in oi['dct']], ulfvl_va, -1)
pass; #LOG and log('ulfvl_va, vas[edcb]={}',(ulfvl_va,vas['edcb']))
elif frm in ('strs','font','font-e'):
vis['edcb'] = True
ens['edcb'] = True
its['edcb'] = oi['lst']
vas['edcb'] = index_1(oi['lst'], ulfvl_va, -1)
pass; #LOG and log('ulfvl_va={}',(ulfvl_va))
pass; #LOG and log('vis={}',(vis))
pass; #LOG and log('ens={}',(ens))
pass; #LOG and log('vas={}',(vas))
pass; #LOG and log('its={}',(its))
return vis,ens,vas,its,bcl
#def _prep_opt
def show(self
, title # For cap of dlg
):
M,m = self.__class__,self
def when_exit(ag):
pass; #LOG and log('',())
pass; #pr_ = dlg_proc_wpr(ag.id_dlg, app.DLG_CTL_PROP_GET, name='edch')
pass; #log('exit,pr_={}',('edch', {k:v for k,v in pr_.items() if k in ('x','y')}))
pass; #log('cols={}',(ag.cattr('lvls', 'cols')))
m.col_ws= [ci['wd'] for ci in ag.cattr('lvls', 'cols')]
m.stores[m.subset+'cmnt_heght'] = m.ag.cattr('cmnt', 'h')
if m.apply_one and m.apply_need:
ed.cmd(cmds.cmd_OpsReloadAndApply)
if m.locate_on_exit:
m.do_file('locate_to', m.locate_on_exit)
#def when_exit
repro_py = apx.get_opt('dlg_cuda_options.repro_py') # 'repro_dlg_opted.py'
m.dlg_min_w = 10 + sum(M.COL_MWS) + M.COL_N + M.SCROLL_W
m.dlg_w = 10 + sum(m.col_ws) + M.COL_N + M.SCROLL_W
m.dlg_h = 380 + m.h_cmnt +10 + M.STBR_H
# m.dlg_h = 270 + m.h_cmnt +10 + M.STBR_H
pass; #log('m.dlg_w,m.dlg_h={}',(m.dlg_w,m.dlg_h))
m.ag = DlgAgent(
form =dict(cap = title + f(' ({})', VERSION_V)
,resize = True
,w = m.dlg_w ,w_min=m.dlg_min_w
,h = m.dlg_h
,on_resize=m.do_resize
)
, ctrls=m.get_cnts()
, vals =m.get_vals()
, fid ='cond'
,options = ({
'gen_repro_to_file':repro_py, #NOTE: repro
} if repro_py else {})
)
# Select on pre-show. Reason: linux skip selection event after show
m.ag._update_on_call(m.do_sele('lvls', m.ag))
m.stbr = app.dlg_proc(m.ag.id_dlg, app.DLG_CTL_HANDLE, name='stbr')
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_ALL)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_SIZE , tag=M.STBR_ALL, value=40)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_ALIGN , tag=M.STBR_ALL, value='R')
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_HINT , tag=M.STBR_ALL, value=_('Number of all options'))
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_FLT)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_SIZE , tag=M.STBR_FLT, value=40)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_ALIGN , tag=M.STBR_FLT, value='R')
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_HINT , tag=M.STBR_FLT, value=_('Number of shown options'))
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_MSG)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_AUTOSTRETCH , tag=M.STBR_MSG, value=True)
m.stbr_act(M.STBR_ALL, len(m.opts_full))
m.stbr_act(M.STBR_FLT, len(m.opts_full))
stor_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+m.how.get('stor_json', 'user.json')
start_mtime = os.path.getmtime(stor_json) if os.path.exists(stor_json) else 0
m.ag.show(when_exit)
m.ag = None
# Save for next using
m.stores[m.subset+'cur_op'] = m.cur_op
m.stores[m.subset+'col_ws'] = m.col_ws
m.stores[m.subset+'sorts'] = m.sorts
if not m.live_fltr:
m.stores[m.subset+'h.cond'] = m.cond_hl
m.stores[m.subset+'all_ops'] = m.all_ops
set_hist('dlg', m.stores)
return start_mtime != (os.path.getmtime(stor_json) if os.path.exists(stor_json) else 0)
#def show
def get_cnts(self, what=''):
M,m = self.__class__,self
reNotWdChar = re.compile(r'\W')
def test_fltr(fltr_s, op, oi):
if not fltr_s: return True
pass; #LOG and log('fltr_s, op, oi[!]={}',(fltr_s, op, oi['!']))
if '!!!' in fltr_s and '!!!' not in oi['!']: return False
if '!!' in fltr_s and '!!' not in oi['!']: return False
pass; #LOG and log('skip !!',())
if '!' in fltr_s and '!' not in oi['!']: return False
pass; #LOG and log('skip !',())
text = op \
+ (' '+oi.get('cmt', '') if '#' in fltr_s else '')
text = text.upper()
fltr_s = fltr_s.replace('!', '').replace('#', '').upper()
if '<' in fltr_s or '>' in fltr_s:
text = '·' + reNotWdChar.sub('·', text) + '·'
fltr_s = ' ' + fltr_s + ' '
fltr_s = fltr_s.replace(' <', ' ·').replace('> ', '· ')
pass; #LOG and log('fltr_s, text={}',(fltr_s, text))
return all(map(lambda c:c in text, fltr_s.split()))
#def test_fltr
def get_tbl_cols(sorts, col_ws):
cnms = list(M.COL_NMS)
cnms[M.COL_FIL] = f(cnms[M.COL_FIL], m.ed.get_prop(app.PROP_TAB_TITLE))
cols = [d(nm=sorts_sign(sorts, c) + cnms[c]
,wd=col_ws[c]
,mi=M.COL_MWS[c]
) for c in range(M.COL_N)]
cols[M.COL_OVR]['al'] = 'C'
if m.how.get('hide_fil', False):
pos_fil = M.COL_NMS.index(M.COL_FILE)
cols[pos_fil]['vi'] = False
if m.how.get('hide_lex_fil', False):
pos_lex = M.COL_NMS.index(M.COL_LEXR)
pos_fil = M.COL_NMS.index(M.COL_FILE)
cols[pos_lex]['vi'] = False
cols[pos_fil]['vi'] = False
return cols
#def get_tbl_cols
def get_tbl_data(opts_full, cond_s, ops_only, sorts, col_ws):
# Filter table data
pass; #LOG and log('cond_s={}',(cond_s))
pass; #log('opts_full/tab_s={}',({o:oi for o,oi in opts_full.items() if o.startswith('tab_s')}))
chp_cond = ''
chp_no_c = False
if '@' in cond_s:
# Prepare to match chapters
chp_cond = ' '.join([mt.group(1) for mt in re.finditer(r'@([\w/]+)' , cond_s)]).upper() # @s+ not empty chp
chp_cond = chp_cond.replace(M.NO_CHAP.upper(), '').strip()
chp_no_c = '@'+M.NO_CHAP in cond_s
cond_s = re.sub( r'@([\w/]*)', '', cond_s) # @s* clear @ and cph
pass; #log('chp_cond, chp_no_c, cond_s={}',(chp_cond, chp_no_c, cond_s))
SKWULFs = [ (oi.get('chp','')
,op
,oi['!']
,str(oi.get('jdf' ,'')).replace('True', 'true').replace('False', 'false')
,str(oi.get('juvl','')).replace('True', 'true').replace('False', 'false')
,str(oi.get('jlvl','')).replace('True', 'true').replace('False', 'false')
,str(oi.get('jfvl','')).replace('True', 'true').replace('False', 'false')
,oi['frm']
)
for op,oi in opts_full.items()
# if (not chp_cond or chp_cond in oi.get('chp', '').upper())
if (not chp_cond or any((chp_cond in oi.get('chp', '').upper()) for chp_cond in chp_cond.split()))
and (not chp_no_c or not oi.get('chp', ''))
and (not cond_s or test_fltr(cond_s, op, oi))
and (not ops_only or op in ops_only)
]
# Sort table data
SKWULFs = sorts_sort(sorts, SKWULFs)
# Fill table
pass; #LOG and log('M.COL_NMS,col_ws,M.COL_MWS={}',(len(M.COL_NMS),len(col_ws),len(M.COL_MWS)))
cols = get_tbl_cols(sorts, col_ws)
itms = (list(zip([_('Section'),_('Option'), '', _('Default'), _('User'), _('Lexer'), _('File')], map(str, col_ws)))
#, [ (str(n)+':'+sc,k ,w ,dv ,uv ,lv ,fv) # for debug
#, [ (sc+' '+fm ,k ,w ,dv ,uv ,lv ,fv) # for debug
, [ (sc ,k ,w ,dv ,uv ,lv ,fv) # for user
for n,( sc ,k ,w ,dv ,uv ,lv ,fv, fm) in enumerate(SKWULFs) ]
)
return SKWULFs, cols, itms
#def get_tbl_data
if not what or '+lvls' in what:
m.SKWULFs,\
m.cols ,\
m.itms = get_tbl_data(m.opts_full, m.cond_s, m.ops_only, m.sorts, m.col_ws)
if 'stbr' in dir(m):
m.stbr_act(M.STBR_FLT, len(m.SKWULFs))
if '+cols' in what:
pass; #LOG and log('m.col_ws={}',(m.col_ws))
m.cols = get_tbl_cols(m.sorts, m.col_ws)
pass; #LOG and log('m.cols={}',(m.cols))
# Prepare [Def]Val data by m.cur_op
vis,ens,vas,its,bcl = m._prep_opt()
ed_s_c = _('>Fil&e:') if m.for_ulf=='f' else \
_('>L&exer:') if m.for_ulf=='l' else \
_('>Us&er:')
cnts = []
if '+cond' in what:
cnts += [0
,('cond',d(items=m.cond_hl))
][1:]
if '+cols' in what or '=cols' in what:
cnts += [0
,('lvls',d(cols=m.cols))
][1:]
if '+lvls' in what or '=lvls' in what:
cnts += [0
,('lvls',d(cols=m.cols, items=m.itms))
][1:]
tofi_en = not m.how.get('only_for_ul', not ens['tofi']) # Forbid to switch fo File ops
if '+cur' in what:
cnts += [0
,('ed_s',d(cap=ed_s_c ,hint=m.cur_op ))
# ,('eded',d(vis=vis['eded'] ,sto=ens['eded'] ,color=bcl['eded'] ))
# ,('eded',d(vis=vis['eded'],ex0=not ens['eded'],sto=ens['eded'] ,color=bcl['eded'] ))
# ,('eded',d(vis=vis['eded'],en=ens['eded'] ,color=bcl['eded'] ))
,('eded',d(vis=vis['eded'],en=ens['eded'] ))
,('edcl',d(vis=vis['edcl'] ,color=bcl['edcl'] ))
,('edcb',d(vis=vis['edcb'] ,items=its['edcb'] ))
,('edrf',d(vis=vis['edrf'] ))
,('edrt',d(vis=vis['edrt'] ))
,('brow',d(vis=vis['brow'] ))
,('toop',d(vis=vis['toop'] ))
,('opjs',d(vis=vis['opjs'] ))
,('dfv_',d( hint=m.cur_op ))
,('dfvl',d( ))
# ,('dfvl',d( en=ens['dfvl'] ,color=bcl['dfvl'] ))
,('dfcl',d(vis=vis['dfcl'] ,color=bcl['dfcl'] ))
,('setd',d( en=ens['setd'] ))
,('tofi',d( en=tofi_en ))
][1:]
if what and cnts:
# Part info
return cnts
# Full dlg controls info #NOTE: cnts
edit_h = get_gui_height('edit')
cmnt_t = m.dlg_h-m.h_cmnt-5-M.STBR_H
tofi_c = m.ed.get_prop(app.PROP_TAB_TITLE)
co_tp = 'ed' if m.live_fltr else 'cb'
cnts = [0 #
# Hidden buttons
,('flt-',d(tp='bt' ,cap='&l' ,sto=False ,t=-99,l=0,w=44)) # &l
,('fltr',d(tp='bt' ,cap='' ,sto=False ,def_bt='1' ,t=-99,l=0,w=44)) # Enter
,('srt0',d(tp='bt' ,cap='&1' ,sto=False ,t=-99,l=0,w=44)) # &1
,('srt1',d(tp='bt' ,cap='&2' ,sto=False ,t=-99,l=0,w=44)) # &2
,('srt2',d(tp='bt' ,cap='&3' ,sto=False ,t=-99,l=0,w=44)) # &3
,('srt3',d(tp='bt' ,cap='&4' ,sto=False ,t=-99,l=0,w=44)) # &4
,('srt4',d(tp='bt' ,cap='&5' ,sto=False ,t=-99,l=0,w=44)) # &5
,('srt5',d(tp='bt' ,cap='&6' ,sto=False ,t=-99,l=0,w=44)) # &6
,('srt6',d(tp='bt' ,cap='&7' ,sto=False ,t=-99,l=0,w=44)) # &7
,('srt-',d(tp='bt' ,cap='&9' ,sto=False ,t=-99,l=0,w=44)) # &9
,('cws-',d(tp='bt' ,cap='&W' ,sto=False ,t=-99,l=0,w=44)) # &w
,('cpnm',d(tp='bt' ,cap='&C' ,sto=False ,t=-99,l=0,w=44)) # &c
,('erpt',d(tp='bt' ,cap='&O' ,sto=False ,t=-99,l=0,w=44)) # &o
,('apnw',d(tp='bt' ,cap='&Y' ,sto=False ,t=-99,l=0,w=44)) # &y
,('help',d(tp='bt' ,cap='&H' ,sto=False ,t=-99,l=0,w=44)) # &h
# Top-panel
,('ptop',d(tp='pn' ,h= 270 ,w=m.dlg_w ,ali=ALI_CL
,h_min=270 ))
# Menu
,('menu',d(tp='bt' ,tid='cond' ,l=-40-5,w= 40 ,p='ptop' ,cap='&=' ,a='LR' )) # &=
# Filter
,('chps',d(tp='bt' ,tid='cond' ,l=-270 ,r=-180 ,p='ptop' ,cap=_('+&Section…') ,hint=M.CHPS_H ,a='LR' )) # &s
,('flt_',d(tp='lb' ,tid='cond' ,l= 5 ,w= 70 ,p='ptop' ,cap='>'+M.FILTER_C+':' ,hint=M.FLTR_H )) # &f
,('cond',d(tp=co_tp,t= 5 ,l= 78 ,r=-270 ,p='ptop' ,items=m.cond_hl ,a='lR' )) #
#,('cond',d(tp='cb' ,t= 5 ,l= 78 ,r=-270 ,p='ptop' ,items=m.cond_hl ,a='lR' )) #
# Table of keys+values
,('lvls',d(tp='lvw',t= 35,h=160,l= 5 ,r= -5 ,p='ptop' ,items=m.itms,cols=m.cols ,grid='1' ,a='tBlR' )) #
# Editors for value
,('ed_s',d(tp='lb' ,t=210 ,l= 5 ,w= 70 ,p='ptop' ,cap=ed_s_c ,hint=m.cur_op ,a='TB' )) # &e
,('eded',d(tp='ed' ,tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,vis=vis['eded'],ex0=not ens['eded'],a='TBlR' )) #
#,('eded',d(tp='ed' ,tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,vis=vis['eded'],en=ens['eded'] ,a='TBlR' )) #
,('edcl',d(tp='clr',t=210-2 ,l= 210 ,r=-271 ,p='ptop' ,h=edit_h-4 ,vis=vis['edcl'],border=True ,a='TBlR' )) #
,('edcb',d(tp='cbr',tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,items=its['edcb'] ,vis=vis['edcb'] ,a='TBlR' )) #
,('edrf',d(tp='ch' ,tid='ed_s' ,l= 78 ,w= 60 ,p='ptop' ,cap=_('f&alse') ,vis=vis['edrf'] ,a='TB' )) # &a
,('edrt',d(tp='ch' ,tid='ed_s' ,l= 140 ,w= 60 ,p='ptop' ,cap=_('t&rue') ,vis=vis['edrt'] ,a='TB' )) # &r
,('brow',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('&...') ,vis=vis['brow'] ,a='TBLR' )) # &.
,('toop',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('&GoTo') ,vis=vis['toop'],hint=M.TOOP_H ,a='TBLR' )) # &g
,('opjs',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('E&dit') ,vis=vis['opjs'],hint=M.OPME_H ,a='TBLR' )) # &d
# View def-value
,('dfv_',d(tp='lb' ,tid='dfvl' ,l= 5 ,w= 70 ,p='ptop' ,cap=_('>Defa&ult:') ,hint=m.cur_op ,a='TB' )) # &u
#,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,en=False ,sto=False ,a='TBlR' )) #
,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,ex0=True ,sto=False ,a='TBlR' )) #
#,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,ro_mono_brd='1,0,1' ,sto=False ,a='TBlR' )) #
,('dfcl',d(tp='clr',t=235+1 ,l= 210 ,r=-271 ,p='ptop' ,h=edit_h-4 ,vis=vis['dfcl'],border=True ,a='TBlR' )) #
,('setd',d(tp='bt' ,tid='dfvl' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('Rese&t') ,en=ens['setd'] ,a='TBLR' )) # &t
# For lexer/file
#,('to__',d(tp='lb' ,tid='ed_s' ,l=-170 ,w= 30 ,p='ptop' ,cap=_('>For:') ,a='TBLR' )) #
,('to__',d(tp='lb' ,tid='ed_s' ,l=-165 ,w= 30 ,p='ptop' ,cap=_('For:') ,a='TBLR' )) #
,('tolx',d(tp='ch' ,tid='ed_s' ,l=-140 ,w= 70 ,p='ptop' ,cap=_('Le&xer') ,a='TBLR' )) # &x
,('tofi',d(tp='ch' ,tid='ed_s' ,l=- 90 ,w= 70 ,p='ptop' ,cap=_('F&ile') ,hint=tofi_c ,en=tofi_en ,a='TBLR' )) # &i
,('lexr',d(tp='cbr',tid='dfvl' ,l=-165 ,w= 160 ,p='ptop' ,items=m.lexr_w_l ,a='TBLR' ))
# Comment
,('cmsp',d(tp='sp' ,y=cmnt_t-5 ,ali=ALI_BT,sp_lr=5 ))
,('cmnt',d(tp='me' ,t=cmnt_t ,h= m.h_cmnt
,h_min=M.CMNT_MHT ,ali=ALI_BT,sp_lrb=5 ,ro_mono_brd='1,1,1' ))
,('stbr',d(tp='sb' ,y=-M.STBR_H
,h= M.STBR_H ,ali=ALI_BT ))
][1:]
if 'mac'==get_desktop_environment():
cnts = [(cid,cnt) for cid,cnt in cnts if cnt.get('cap', '')[:3]!='srt']
cnts = odict(cnts)
if m.how.get('hide_fil', False):
for cid in ('tofi',):
cnts[cid]['vis'] = False
if m.how.get('hide_lex_fil', False):
for cid in ('to__', 'tolx', 'lexr', 'tofi'):
cnts[cid]['vis'] = False
for cnt in cnts.values():
if 'l' in cnt: cnt['l'] = m.dlg_w+cnt['l'] if cnt['l']<0 else cnt['l']
if 'r' in cnt: cnt['r'] = m.dlg_w+cnt['r'] if cnt['r']<0 else cnt['r']
if 'y' in cnt: cnt['y'] = m.dlg_h+cnt['y'] if cnt['y']<0 else cnt['y']
cnts['menu']['call'] = m.do_menu
cnts['chps']['call'] = m.do_menu
cnts['cpnm']['call'] = m.do_menu
cnts['erpt']['call'] = m.do_menu
cnts['apnw']['call'] = m.do_menu
cnts['flt-']['call'] = m.do_fltr
cnts['fltr']['call'] = m.do_fltr
if m.live_fltr:
cnts['cond']['call'] = m.do_fltr
cnts['lexr']['call'] = m.do_lxfi
cnts['tolx']['call'] = m.do_lxfi
cnts['tofi']['call'] = m.do_lxfi
cnts['lvls']['call'] = m.do_sele
cnts['lvls']['on_click_header'] = m.do_sort
cnts['srt0']['call'] = m.do_sort
cnts['srt1']['call'] = m.do_sort
cnts['srt2']['call'] = m.do_sort
cnts['srt3']['call'] = m.do_sort
cnts['srt4']['call'] = m.do_sort
cnts['srt5']['call'] = m.do_sort
cnts['srt6']['call'] = m.do_sort
cnts['srt-']['call'] = m.do_sort
cnts['cmsp']['call'] = m.do_cust
cnts['cws-']['call'] = m.do_cust
cnts['lvls']['on_click_dbl'] = m.do_dbcl #lambda idd,idc,data:print('on dbl d=', data)
cnts['setd']['call'] = m.do_setv
cnts['edcb']['call'] = m.do_setv
cnts['edrf']['call'] = m.do_setv
cnts['edrt']['call'] = m.do_setv
cnts['brow']['call'] = m.do_setv
cnts['toop']['call'] = m.do_setv
cnts['opjs']['call'] = m.do_setv
cnts['help']['call'] = m.do_help
return cnts
#def get_cnts
def get_vals(self, what=''):
M,m = self.__class__,self
m.cur_in = m._prep_opt('key2ind')
if not what or 'cur' in what:
vis,ens,vas,its,bcl = m._prep_opt()
if not what:
# all
return dict(cond=m.cond_s
,lvls=m.cur_in
,eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
,tolx=m.for_ulf=='l'
,tofi=m.for_ulf=='f'
,lexr=m.lexr_l.index(m.lexr) if m.lexr in m.lexr_l else -1
)
if '+' in what:
rsp = dict()
if '+lvls' in what:
rsp.update(dict(
lvls=m.cur_in
))
if '+cur' in what:
rsp.update(dict(
eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
))
if '+inlxfi' in what:
rsp.update(dict(
tolx=m.for_ulf=='l'
,tofi=m.for_ulf=='f'
))
pass; #LOG and log('rsp={}',(rsp))
return rsp
if what=='lvls':
return dict(lvls=m.cur_in
)
if what=='lvls-cur':
return dict(lvls=m.cur_in
,eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
)
if what=='cur':
return dict(eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
)
#def get_vals
def do_resize(self, ag):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
f_w = ag.fattr('w')
l_w = ag.cattr('lvls', 'w')
pass; #LOG and log('f_w,l_w={}',(f_w,l_w))
if f_w < m.dlg_min_w: return [] # fake event
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if f_w == m.dlg_min_w and m.col_ws!=M.COL_MWS:
return m.do_cust('cws-', ag)
sum_ws = sum(m.col_ws)
pass; #LOG and log('l_w,sum_ws={}',(l_w,sum_ws))
if sum_ws >= (l_w - M.COL_N - M.SCROLL_W):return [] # decrease dlg - need user choice
# Auto increase widths of def-val and user-val cols
extra = int((l_w - M.COL_N - M.SCROLL_W - sum_ws)/2)
pass; #LOG and log('extra={}',(extra))
pass; #LOG and log('m.col_ws={}',(m.col_ws))
m.col_ws[3] += extra
m.col_ws[4] += extra
pass; #LOG and log('m.col_ws={}',(m.col_ws))
return d(ctrls=m.get_cnts('+cols'))
#def do_resize
def do_cust(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid={}',(aid))
if False:pass
elif aid=='cmsp':
# Splitter moved
sp_y = ag.cattr('cmsp', 'y')
return []
##??
elif aid=='cws-':
# Set def col widths
m.col_ws = M.COL_MWS[:]
m.stores.pop(m.subset+'col_ws', None)
return d(ctrls=m.get_cnts('+cols'))
elif aid=='vali':
if dlg_valign_consts():
return d(ctrls=m.get_cnts())
return []
elif aid=='rslt':
# Restore dlg/ctrls sizes
fpr = ag.fattrs()
layout = data
m.col_ws = layout.get('col_ws', m.col_ws)
cmnt_h = layout.get('cmnt_h', ag.cattr('cmnt', 'h'))
dlg_h = layout.get('dlg_h' , fpr['h'])
dlg_w = layout.get('dlg_w' , fpr['w'])
return d(ctrls=
m.get_cnts('+cols')+
[('cmnt', d(h=cmnt_h))
,('stbr', d(y=dlg_h)) # Hack to push it at bottom (by Alex)
],form=d(
h=dlg_h
,w=dlg_w
))
elif aid=='svlt':
# Save dlg/ctrls sizes
m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
layout = data
fpr = ag.fattrs()
layout['dlg_w'] = fpr['w']
layout['dlg_h'] = fpr['h']
layout['cmnt_h']= ag.cattr('cmnt', 'h')
layout['col_ws']= m.col_ws
#def do_cust
def do_menu(self, aid, ag, data=''):
pass; #LOG and log('aid={}',(aid))
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
if scam=='c' and aid=='menu':
return m.do_cust('vali', ag)
def wnen_menu(ag, tag):
pass; #LOG and log('tag={}',(tag))
if False:pass
elif tag[:3]=='ch:':
return m.do_fltr('chps', ag, tag[3:])
elif tag=='srt-':
return m.do_sort('', ag, -1)
elif tag[:3]=='srt':
return m.do_sort('', ag, int(tag[3]))
elif tag=='cws-':
return m.do_cust(tag, ag)
elif tag=='vali':
return m.do_cust(tag, ag)
# elif tag=='lubk':
# if app.ID_OK != app.msg_box(
# _('Restore user settings from backup copy?')
# , app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
# return m.do_file('restore-user')
# elif tag=='llbk':
# if app.ID_OK != app.msg_box(
# f(_('Restore lexer "{}" settings from backup copy?'), m.lexr)
# , app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
# return m.do_file('restore-lexr')
# elif tag=='dobk':
# m.stores[m.subset+'bk_sets'] = m.bk_sets = not m.bk_sets
# return []
# elif tag=='dfns':
# m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
# new_file = app.dlg_file(True, m.defn_path.name, str(m.defn_path.parent), 'JSONs|*.json')
# if not new_file or not os.path.isfile(new_file): return []
# return m.do_file('set-dfns', new_file)
elif tag=='full':
m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
m.all_ops = not m.all_ops
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
, ed_=m.ed, full=m.all_ops)
m.cur_op = m.cur_op if m.cur_op in m.opts_full else ''
m.do_file('build-chp-tree')
m.stbr_act(M.STBR_ALL, len(m.opts_full))
return d(ctrls=odict(m.get_cnts('+lvls +cur')))
if tag=='apex':
m.apply_one = not m.apply_one
m.stores[m.subset+'apply_one'] = m.apply_one
if tag=='apnw':
ed.cmd(cmds.cmd_OpsReloadAndApply)
if tag=='aufi':
m.auto4file = not m.auto4file
m.stores[m.subset+'auto4file'] = m.auto4file
if tag=='lifl':
m.stores[m.subset+'live_fltr'] = not m.stores.get(m.subset+'live_fltr' , False)
M.restart = True
M.restart_cond = ag.cval('cond')
return None # Close dlg
elif tag=='cpnm':
app.app_proc(app.PROC_SET_CLIP, m.cur_op)
elif tag=='erpt':
body = '\n'.join(m.chng_rpt)
dlg_wrapper(_('Сhange log') , 500+10 ,400+10,
[ dict(cid='body',tp='me' ,l=5,w=500 ,t=5,h=400, ro_mono_brd='1,0,0')]
, dict(body=body), focus_cid='body')
elif tag=='locv':
# m.do_file('locate-opt') # while wait core fix
if m.do_file('goto-opt'): return None # need close dlg
elif tag=='locd':
# m.do_file('locate-def') # while wait core fix
if m.do_file('goto-def'): return None # need close dlg
elif tag[:4] in ('rslt', 'rmlt', 'svlt'):
layouts_l = m.stores.get(m.subset+'layouts', []) # [{nm:Nm, dlg_h:H, dlg_w:W, ...}]
layouts_d = {lt['nm']:lt for lt in layouts_l}
lt_i = int(tag[4:]) if tag[:4] in ('rslt', 'rmlt') else -1
layout = layouts_l[lt_i] if lt_i>=0 else None
if 0:pass
elif tag[:4]=='rmlt':
if app.ID_OK != app.msg_box(
f(_('Remove layout "{}"?'), layout['nm'])
, app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
del layouts_l[lt_i]
elif tag=='svlt':
nm_tmpl = _('#{}')
layout_nm = f(nm_tmpl
,first_true(itertools.count(1+len(layouts_d))
,pred=lambda n:f(nm_tmpl, n) not in layouts_d)) # First free #N after len()
while True:
pass; #LOG and log('layout_nm={!r}',(layout_nm))
layout_nm = app.dlg_input('Name to save current sizes of the dialog and controls', layout_nm)
if not layout_nm: return []
layout_nm = layout_nm.strip()
if not layout_nm: return []
if layout_nm in layouts_d and \
app.ID_OK != app.msg_box(
f(_('Name "{}" already used. Overwrite?'), layout_nm)
, app.MB_OKCANCEL+app.MB_ICONQUESTION): continue
break
layout = None
if layout_nm in layouts_d:
layout = layouts_d[layout_nm] # Overwrite
else:
layout = d(nm=layout_nm) # Create
layouts_l+=[layout]
# Fill
m.do_cust( 'svlt', ag, layout)
elif tag[:4]=='rslt':
return m.do_cust('rslt', ag, layout)
# Save
m.stores[m.subset+'layouts'] = layouts_l
return []
elif tag=='rprt':
m.do_file('out-rprt')
elif tag=='help':
return m.do_help('', ag)
return []
#def wnen_menu
pass; #LOG and log('',())
if aid=='chps':
def tree2menu(node, chp=''):
mn_l = [ d( tag='ch:'+ node['path']
, cap=f('{} ({})', chp, len(node['ops']))
, cmd=wnen_menu)
,d( cap='-')
] if chp else []
for chp,kid in node['kids'].items():
mn_l +=([d( cap=f('{} ({})', chp, len(kid['ops']))
, sub=tree2menu(kid, chp))
]
if 'kids' in kid else
[d( tag='ch:'+ kid['path']
, cap=f('{} ({})', chp, len(kid['ops']))
, cmd=wnen_menu)
]
)
return mn_l
#def tree2menu
mn_its = tree2menu(m.chp_tree)
ag.show_menu('chps', mn_its)
if aid=='apnw': return wnen_menu(ag, aid)
if aid=='cpnm': return wnen_menu(ag, aid)
if aid=='erpt': return wnen_menu(ag, aid)
if aid=='menu':
locv_c = f(M.LOCV_C, m.cur_op)
locd_c = f(M.LOCD_C, m.cur_op)
lts_l = m.stores.get(m.subset+'layouts', []) # [{nm:Nm, dlg_h:H, dlg_w:W, ...}]
full_en = not m.how.get('only_with_def', False) # Forbid to switch fo User+Lexer ops
live_fltr=m.stores.get(m.subset+'live_fltr' , False)
pass; #lts_l = [d(nm='Nm1'), d(nm='Nm2')]
mn_its = \
[ d(tag='cpnm' ,cap=_('&Copy option name') ,key='Alt+C'
),d( cap='-'
),d( cap=_('&Layout') ,sub=
[ d(tag='svlt' ,cap=_('&Save current layout...')
),d( cap='-'
)]+ (
[ d(tag='rslt'+str(nlt) ,cap=f(_('Restore layout "{}"'), lt['nm'])) for nlt, lt in enumerate(lts_l)
]+
[ d( cap=_('&Forget layout'),sub=
[ d(tag='rmlt'+str(nlt) ,cap=f(_('Forget layout "{}"...'), lt['nm'])) for nlt, lt in enumerate(lts_l)
])
] if lts_l else []) +
[ d( cap='-'
),d(tag='vali' ,cap=_('Adjust vertical alignments...')
),d(tag='cws-' ,cap=_('Set default columns &widths') ,key='Alt+W'
)]
),d( cap=_('&Table') ,sub=
[ d(tag='srt'+str(cn) ,cap=f(_('Sort by column "{}"'), cs.split()[0])
,ch=sorts_on(m.sorts, cn)
,key='Alt+'+str(1+cn))
for cn, cs in enumerate(M.COL_NMS)
]+
[ d( cap='-'
),d(tag='srt-' ,cap=_('Reset sorting') ,key='Alt+9'
)]
),d( cap=_('M&ore') ,sub=
[ d(tag='locv' ,cap=locv_c ,en=bool(m.cur_op)
),d(tag='locd' ,cap=locd_c ,en=bool(m.cur_op)
),d( cap='-'
),d(tag='erpt' ,cap=_('Show rep&ort of changes...') ,key='Alt+O'
),d(tag='apex' ,cap=_('Apply changes on exit') ,ch=m.apply_one
),d(tag='apnw' ,cap=_('Appl&y changes now') ,en=m.apply_need ,key='Alt+Y'
),d(tag='aufi' ,cap=_('Auto-update FILE options') ,ch=m.auto4file
),d( cap='-'
),d(tag='lifl' ,cap=M.LIFL_C ,ch=live_fltr
),d( cap='-'
),d(tag='full' ,cap=M.FULL_C ,ch=m.all_ops ,en=full_en
)]
),d( cap='-'
),d( tag='rprt' ,cap=_('Create HTML &report')
),d( cap='-'
),d( tag='help' ,cap=_('&Help...') ,key='Alt+H'
)]
pass; #LOG and log('mn_its=¶{}',pf(mn_its))
def add_cmd(its):
for it in its:
if 'sub' in it: add_cmd(it['sub'])
else: it['cmd']=wnen_menu
add_cmd(mn_its)
ag.show_menu(aid, mn_its)
return []
#def do_menu
def do_fltr(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
fid = ag.fattr('fid')
pass; #LOG and log('aid,fid={}',(aid,fid))
if aid=='fltr' and fid in ('dfvl', 'eded', 'edrf', 'edrt'):
# Imitate default button
return m.do_setv('setd' if fid in ('dfvl',) else
'setv' if fid in ('eded',) else
fid if fid in ('edrf', 'edrt') else
''
, ag)
if aid=='cond':
pass; #LOG and log('ag.cval(cond)={}',(ag.cval('cond')))
m.cond_s = ag.cval('cond')
fid = '' if m.live_fltr else 'lvls'
if aid=='fltr':
m.cond_s = ag.cval('cond')
m.cond_hl = add_to_history(m.cond_s, m.cond_hl) if m.cond_s and not m.live_fltr else m.cond_hl
fid = 'lvls'
if aid=='flt-':
m.cond_s = ''
fid = 'cond'
if aid=='chps':
# Append selected chapter as filter value
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
path = '@'+data
if path not in m.cond_s:
if scam!='c':
m.cond_s= re.sub(r'@([\w/]*)', '', m.cond_s).strip() # del old
m.cond_s = (m.cond_s+' '+path).strip() # add new
m.cond_hl = add_to_history(m.cond_s, m.cond_hl) if not m.live_fltr else m.cond_hl
fid = 'cond'
# Select old/new op
m.cur_op= m._prep_opt('ind2key')
ctrls = m.get_cnts('+lvls')
m.cur_in= m._prep_opt('key2ind')
if m.cur_in<0 and m.SKWULFs:
# Sel top if old hidden
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
return d(ctrls=m.get_cnts('+cond =lvls +cur')
,vals =m.get_vals()
,form =d(fid=fid)
)
#def do_fltr
def do_sort(self, aid, ag, col=-1):
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
pass; #LOG and log('col,scam={}',(col,scam))
pass; #return []
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid=='srt-' or col==-1:
m.sorts = sorts_dflt(len(M.COL_NMS))
else:
col = int(aid[3]) if aid[:3]=='srt' else col
pass; #LOG and log('?? m.sorts={}',(m.sorts))
m.sorts = sorts_turn(m.sorts, col, scam)
pass; #LOG and log('ok m.sorts={}',(m.sorts))
old_in = m._prep_opt('key2ind')
ctrls = m.get_cnts('+lvls')
if old_in==0:
# Set top if old was top
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
else:
# Save old op
m.cur_in= m._prep_opt('key2ind')
return d(ctrls=m.get_cnts('=lvls +cur')
,vals =m.get_vals()
)
#def do_sort
def do_sele(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('data,m.cur_op,m.cur_in={}',(data,m.cur_op,m.cur_in))
m.cur_op= m._prep_opt('ind2key')
pass; #LOG and log('m.cur_op,m.cur_in={}',(m.cur_op,m.cur_in))
pass; #log('###m.get_cnts(+cur)={}',(m.get_cnts('+cur')))
return d(ctrls=odict(m.get_cnts('+cur'))
,vals = m.get_vals('cur')
)
#def do_sele
def do_lxfi(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid={}',(aid))
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if False:pass
elif aid in ('tolx', 'tofi'):
# Changed "For Lexer/File"
m.for_ulf = 'l' if aid=='tolx' and ag.cval('tolx') else \
'f' if aid=='tofi' and ag.cval('tofi') else \
'u'
fid = 'lexr' \
if m.for_ulf=='l' and m.lexr not in m.lexr_l else \
m._prep_opt('fid4ed')
return d(ctrls=m.get_cnts('+cur')
,vals =m.get_vals('+cur+inlxfi')
,form =d(fid=fid)
)
elif aid=='lexr':
# Change current lexer
lexr_n = ag.cval('lexr')
m.lexr = m.lexr_l[lexr_n] if lexr_n>=0 else ''
m.cur_op= m._prep_opt('ind2key')
m.do_file('load-data')
ctrls = m.get_cnts('+lvls')
m.cur_in= m._prep_opt('key2ind')
if m.cur_in<0 and m.SKWULFs:
# Sel top if old hidden
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
elif m.cur_in<0:
m.cur_op= ''
return d(ctrls=m.get_cnts('=lvls +cur')
,vals =m.get_vals()#'+lvls +cur')
)
#def do_lxfi
def do_dbcl(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('data,m.cur_op,m.cur_in={}',(data,m.cur_op,m.cur_in))
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid!='lvls': return []
# Dbl-click on lvls cell
if sum(m.col_ws) > ag.cattr('lvls', 'w') - M.SCROLL_W:
# Has hor-scrolling
pass; #LOG and log('skip as h-scroll',())
return []
op_r = ag.cval('lvls')
op_c = next(filter( # next(filter())==first_true
lambda col_n_sw: col_n_sw[1]>data[0] # > x from click (x,y)
, enumerate(accumulate(m.col_ws)) # (n_col, sum(col<=n))
), [-1, -1
])[0]
pass; #LOG and log('op_r,op_c,m.cur_op,m.cur_in={}',(op_r,op_c,m.cur_op,m.cur_in))
pass; #LOG and log('op_r,op_c={}',(op_r,op_c))
if False:pass
elif op_c not in (M.COL_DEF,M.COL_USR,M.COL_LXR,M.COL_FIL):
return []
elif -1==op_r:
pass; #LOG and log('skip as no opt',())
return []
elif -1==op_c:
pass; #LOG and log('skip as miss col',())
return []
elif M.COL_DEF==op_c:
return d(form =d(fid='setd'))
elif M.COL_USR==op_c and m.for_ulf!='u':
# Switch to user vals
m.for_ulf = 'u'
elif M.COL_LXR==op_c and m.for_ulf!='l':
# Switch to lexer vals
m.for_ulf = 'l'
elif M.COL_FIL==op_c and m.for_ulf!='f':
# Switch to lexer vals
m.for_ulf = 'f'
else:
return []
pass; LOG and log('op_r,op_c,m.for_ulf={}',(op_r,op_c,m.for_ulf))
return d(ctrls=m.get_cnts('+cur')
,vals =m.get_vals('+cur+inlxfi')
,form =d(fid=m._prep_opt('fid4ed'))
)
#def do_dbcl
def do_setv(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid,m.cur_op={}',(aid,m.cur_op))
if not m.cur_op: return []
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid=='toop':
# m.do_file('locate-opt') # while wait core fix
if m.do_file('goto-opt'): return None # need close dlg
return []
trg = 'lexer '+m.lexr+'.json' if m.for_ulf=='l' else 'user.join'
key4v = m.for_ulf+'val'
op = m.cur_op
oi = m.opts_full[op]
frm = oi['frm']
# if frm=='json':
# m.stbr_act(M.STBR_MSG, f(_('Edit {!r} to change value'), trg))
# return []
dval = oi.get( 'def')
uval = oi.get('uval')
lval = oi.get('lval')
fval = oi.get('fval')
ulfvl = oi.get(key4v ) #fval if m.for_ulf=='f' else lval if m.for_ulf=='l' else uval
jval = oi['jlvl'] if m.for_ulf=='l' else \
oi['juvl'] if m.for_ulf=='u' else \
oi['jfvl']
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
# Get new value
newv = None
erpt_s = ''
if False:pass
elif aid=='setd' and \
m.for_ulf=='f' and \
op in apx.OPT2PROP:
# Remove from file - set over def/user/lex val
newv = oi.get('lval', oi.get('uval', oi.get('def')))
if newv==ulfvl:
m.stbr_act(M.STBR_MSG, _('No need changes'))
return []
erpt_s = 'reset-f'
m.ed.set_prop(apx.OPT2PROP[op], newv)
elif aid=='setd' and \
ulfvl is not None and \
m.for_ulf!='f':
# Remove from user/lexer
if scam!='c' and \
app.ID_OK != app.msg_box(f(_('Remove {} option'
'\n {} = {!r}'
'\n?'), 'LEXER' if m.for_ulf=='l' else 'USER', op, jval)
, app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
newv= None
elif aid=='brow' and frm in ('hotk', 'file', '#rgb', '#rgb-e'):
ulfvl_s = '' if ulfvl is None else ulfvl
m.stbr_act(M.STBR_MSG, f(_('Default value: "{}". Old value: "{}"'), dval, ulfvl_s))
if frm in ('#rgb', '#rgb-e'):
ulfvl_s = ulfvl_s if ulfvl_s else dval if frm=='#rgb' else '#fff'
newv = app.dlg_color(apx.html_color_to_int(ulfvl_s))
if newv is None: return []
newv = apx.int_to_html_color(newv)
else:
newv= (app.dlg_hotkey(op) if frm=='hotk' else
app.dlg_file(False, '', os.path.expanduser(ulfvl_s), '') if frm=='file' else None)
m.stbr_act(M.STBR_MSG, '')
if not newv: return []
elif aid=='opjs':
newv = edit_json_as_dict(op, ulfvl, dval, oi.get('cmt' , ''))
if newv is None: return []
elif aid=='setv': # Add/Set opt for user/lexer/file
# Enter from edit. Need parse some string
newv = m.ag.cval('eded')
try:
newv = int(newv) if frm=='int' else \
float(newv) if frm=='float' else \
newv
except Exception as ex:
app.msg_box(f(_('Incorrect value. It\'s needed in format: {}'), frm)
, app.MB_OK+app.MB_ICONWARNING)
return d(form=d(fid='eded'))
if frm=='#rgb' or frm=='#rgb-e' and newv: # Testing new val
try:
apx.html_color_to_int(newv)
except Exception as ex:
app.msg_box(f(_('Incorrect value. It\'s needed in format: {}'), '#RGB or #RRGGBB')
, app.MB_OK+app.MB_ICONWARNING)
return d(form=d(fid='eded'))
elif aid in ('edrf', 'edrt'): # Add/Set opt for user/lexer/file
newv = aid=='edrt'
newv = not newv if newv==ulfvl else newv
elif aid=='edcb': # Add/Set opt into user/lexer/file
pass; #LOG and log('oi={}',(oi))
vl_l = [k for k,v in oi.get('dct', [])] if 'dct' in oi else oi.get('lst', [])
pass; #LOG and log('vl_l={}',(vl_l))
pass; #LOG and log('m.ag.cval(edcb)={}',(m.ag.cval('edcb')))
newv = vl_l[m.ag.cval('edcb')]
pass; #LOG and log('newv={}',(newv))
# Use new value to change env
if newv is not None and newv==ulfvl:
m.stbr_act(M.STBR_MSG, _('No need changes'))
return []
if m.for_ulf=='f' and newv is not None and op in apx.OPT2PROP:
# Change for file
erpt_s = 'set-f'
ed.set_prop(apx.OPT2PROP[op], newv)
if m.for_ulf!='f':
# Change target file
pass; #LOG and log('?? do_erpt',())
erpt_s =('reset-u' if newv is None and m.for_ulf=='u' else
'reset-l' if newv is None and m.for_ulf=='l' else
'add-u' if ulfvl is None and m.for_ulf=='u' else
'add-l' if ulfvl is None and m.for_ulf=='l' else
'set-u' if m.for_ulf=='u' else
'set-l' if m.for_ulf=='l' else '')
pass; #LOG and log('?? set_opt',())
apx.set_opt(op
,newv
,apx.CONFIG_LEV_LEX if m.for_ulf=='l' else apx.CONFIG_LEV_USER
,ed_cfg =None
,lexer =m.lexr if m.for_ulf=='l' else None
,user_json=m.how.get('stor_json', 'user.json')
)
if not m.apply_one:
pass; #LOG and log('?? OpsReloadAndApply',())
ed.cmd(cmds.cmd_OpsReloadAndApply)
else:
m.apply_need = True
# Use new value to change dlg data
pass; #LOG and log('?? oi={}',(oi))
pass; #LOG and log('?? m.opts_full={}',pf(m.opts_full))
if False:pass
elif aid=='setd':
oi.pop(key4v, None) if m.for_ulf!='f' else 0
else:
pass; #LOG and log('key4v, newv={}',(key4v, newv))
oi[key4v] = newv
pass; #LOG and log('oi={}',(oi))
upd_cald_vals(m.opts_full)
pass; #LOG and log('oi={}',(oi))
jnewv = oi['jlvl'] if m.for_ulf=='l' else oi['juvl'] if m.for_ulf=='u' else oi['jfvl']
m.do_erpt(erpt_s, jnewv, jval)
pass; #LOG and log('ok oi={}',(oi))
pass; #LOG and log('ok m.opts_full={}',pf(m.opts_full))
pass; #LOG and log('?? get_cnts',())
if m.for_ulf!='f' and m.auto4file and op in apx.OPT2PROP:
# Change FILE to over
newv = oi.get('lval', oi.get('uval', oi.get('def')))
if newv!=oi.get('fval'):
erpt_s = 'reset-f'
m.ed.set_prop(apx.OPT2PROP[op], newv)
oi['fval'] = newv
jval = oi['jfvl']
upd_cald_vals(m.opts_full)
jnewv = oi['jfvl']
m.do_erpt('auset-f', jnewv, jval)
pass; #LOG and log('m.get_vals(lvls-cur)={}',(m.get_vals('lvls-cur')))
return d(ctrls=m.get_cnts('+lvls+cur')
,vals =m.get_vals('lvls-cur')
)
#def do_setv
def do_erpt(self, what='', jnewv=None, joldv=None):
pass; #LOG and log('what, newv={}',(what, newv))
M,m = self.__class__,self
if 0==len(m.chng_rpt):
rpt = f('Starting to change options at {:%Y-%m-%d %H:%M:%S}', datetime.datetime.now())
m.chng_rpt += [rpt]
oi = m.opts_full[m.cur_op]
oldv= None
rpt = ''
if 0:pass
elif what=='reset-f':
rpt = f(_('Set FILE option to overridden value {!r}') ,jnewv)
elif what=='set-f':
rpt = f(_('Set FILE option to {!r}') ,jnewv)
elif what=='auset-f':
rpt = f(_('Auto-set FILE option to overridden value {!r}') ,jnewv)
elif what=='reset-l':
rpt = f(_('Remove LEXER {!r} option') ,m.lexr )
elif what=='set-l':
rpt = f(_('Set LEXER {!r} option to {!r}') ,m.lexr ,jnewv)
elif what=='add-l':
rpt = f(_('Add LEXER {!r} option {!r}') ,m.lexr ,jnewv)
elif what=='reset-u':
rpt = f(_('Remove USER option') )
elif what=='set-u':
rpt = f(_('Set USER option to {!r}') ,jnewv)
elif what=='add-u':
rpt = f(_('Add USER option {!r}') ,jnewv)
else:
return
rpt = f('{} (from {!r})', rpt, joldv) \
if what[:3]!='add' and joldv is not None else rpt
rpt = rpt.replace('True', 'true').replace('False', 'false')
rpt = m.cur_op + ': ' + rpt
rpt = f('{}. ', len(m.chng_rpt)) + rpt
# print(rpt)
m.stbr_act(M.STBR_MSG, rpt + _(' [Alt+O - all changes]'))
m.chng_rpt += [rpt]
#def do_erpt
def do_help(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('',())
dlg_wrapper('Help'
, 680+10, 500+10
, [d(cid='body', tp='me', l=5, t=5, w=680, h=500, ro_mono_brd='1,1,0')]
, d( body= #NOTE: help
f(
_( 'About "{fltr}"'
'\r '
)
+M.FLTR_H+
_('\r '
'\rOther tips.'
'\r • Use ENTER to filter table and to change or reset value.'
'\r • Use double click on any cell in columns'
'\r "{c_usr}"'
'\r "{c_lxr}"'
'\r "{c_fil}"'
'\r to change "{in_lxr}" flag and to put focus on the value field.'
'\r • Use double click on any cell in column'
'\r "{c_def}"'
'\r to put focus on "{reset}".'
'\r • Clicking "{reset}" will ask for confirmation, for user/lexer options.'
'\r Hold Ctrl key to skip this confirmation.'
'\r • Click on a column header sorts data in the column.'
'\r Alt+# (# is 1..8) sorts the N column (not on macOS).'
'\r Alt+9 resets sorting (not on macOS).'
'\r Click with Ctrl allows to sort by several columns.'
'\r Clicking with Ctrl on already sorted column does 2-state loop (down, up).'
'\r Clicking with Ctrl on already sorted column with maximal sorting index, '
'\r does 3-state loop (down, up, off).'
'\r • Use option "{lifl}" to see instant update of the list after'
'\r each changing in the filter field'
'\r (otherwise you need to press Enter after changing).'
'\r With this option, no history of the filter is kept'
'\r (filter combobox has empty dropdown list).'
'\r • If current list line is scrolled out of view, '
'\r you can still see the option name - in the tooltip'
'\r of "User" (Lexer/File) label near the value field.'
'\r • Tooltip shows file name (or tag name), when cursor hovers the checkbox "{tofi}".'
'\r • Some plugins store their settings into user.json.'
'\r So after a while, user.json contains options not present in default.json.'
'\r To see all these keys, use option "{full}".'
'\r • Values in table column "!"'
'\r ! option is set in "user.json",'
'\r !! option is set in "lexer NNN.json",'
'\r !!! option is set for current file,'
'\r L default value is from "settings_default/lexer NNN.json",'
'\r + not CudaText standard option.'
) , c_usr=M.COL_NMS[M.COL_USR]
, c_lxr=M.COL_NMS[M.COL_LXR]
, c_fil=M.COL_NMS[M.COL_FIL].split()[0]
, c_def=M.COL_NMS[M.COL_DEF]
, fltr = ag.cattr('flt_', 'cap', live=False).replace('&', '').strip(':')
, in_lxr=ag.cattr('tolx', 'cap', live=False).replace('&', '')
, reset= ag.cattr('setd', 'cap', live=False).replace('&', '')
, tofi = ag.cattr('tofi', 'cap', live=False).replace('&', '')
, lifl = M.LIFL_C.replace('&', '')
, full = M.FULL_C.replace('&', '')
))
)
return []
#def do_help
restart = False
restart_cond= None
#class OptEdD
def edit_json_as_dict(op, uval, dval, cmnt4v):
""" Allow user to edit JSON value
"""
pass; #log("op, uval, dval={}",(op, uval, dval))
newv = None
def acts(aid, ag, data=''):
nonlocal newv
if False:pass
elif aid=='defv':
return d(vals=d(meme=json.dumps(dval, indent=2)),fid='meme')
elif aid=='undo':
return d(vals=d(meme=json.dumps(uval, indent=2)),fid='meme')
elif aid in ('test', 'okok'):
mejs = ag.cval('meme')
pass; #log("mejs={!r}",(mejs))
try:
jsvl = json.loads(mejs, object_pairs_hook=odict)
except Exception as ex:
warn = str(ex) + c10 + (c10.join('{:>3}|{}'.format(n+1, s.replace(' ','·'))
for n,s in enumerate(mejs.split(c10))))
return d(vals=d(cmnt=warn),fid='meme')
# app.msg_box(str(ex)
# +c10+(c10.join('{:>3}|{}'.format(n+1, s.replace(' ','·'))
# for n,s in enumerate(mejs.split(c10))))
# , app.MB_OK)
# return d(fid='meme')
if aid=='okok':
newv = jsvl
return None # Close
return d(vals=d(cmnt=cmnt4v),fid='meme')
#def acts
DlgAgent(
form =dict(cap = f(_('Edit JSON option ({})'), op)
,resize = True
,w = 510
,h = 400
)
, ctrls=[0
,('meme',d(tp='me' ,l= 5 ,w=500 ,t= 5 ,h=150 ,a='tBlR'))
,('cmnt',d(tp='me' ,l= 5 ,w=500 ,t=160 ,h=200 ,ro_mono_brd='1,1,1' ,a='TBlR'))
,('defv',d(tp='bt' ,l= 5 ,w=110 ,t=370 ,cap=_('Set &default') ,a='TB' ,call=acts ,en=(dval is not None)))
,('undo',d(tp='bt' ,l=120 ,w=110 ,t=370 ,cap=_('&Undo changes') ,a='TB' ,call=acts))
,('test',d(tp='bt' ,l=285 ,w= 70 ,t=370 ,cap=_('Chec&k') ,a='TBLR' ,call=acts))
,('cans',d(tp='bt' ,l=360 ,w= 70 ,t=370 ,cap=_('Cancel') ,a='TBLR' ,call=acts))
,('okok',d(tp='bt' ,l=435 ,w= 70 ,t=370 ,cap=_('OK') ,a='TBLR' ,call=acts ,def_bt=True))
][1:]
, vals =dict(meme=json.dumps(uval, indent=2)
,cmnt=cmnt4v)
, fid ='meme'
).show()
return newv
#def edit_json_as_dict
class Command:
def dlg_cuda_options(self):
while True:
OptEdD.restart = False
self._dlg_opt()
if not OptEdD.restart: break
#def dlg_cuda_options
def _dlg_opt(self):
if app.app_api_version()<MIN_API_VER: return app.msg_status(_('Need update CudaText'))
defs_json = apx.get_opt('dlg_cuda_options.defs_json', 'default.json')
defs_json = defs_json if os.sep in defs_json else apx.get_def_setting_dir()+os.sep+defs_json
OptEdD(
path_keys_info=defs_json
, subset='df.'
).show(_('CudaText options'))
#def _dlg_opt
#class Command
def add_to_history(val:str, lst:list, max_len=MAX_HIST, unicase=False)->list:
""" Add/Move val to list head. """
lst_u = [ s.upper() for s in lst] if unicase else lst
val_u = val.upper() if unicase else val
if val_u in lst_u:
if 0 == lst_u.index(val_u): return lst
del lst[lst_u.index(val_u)]
lst.insert(0, val)
if len(lst)>max_len:
del lst[max_len:]
return lst
#def add_to_history
RPT_HEAD = '''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CudaText options</title>
<style type="text/css">
td, th, body {
color: #000;
font-family: Verdana, Arial, Helvetica, sans-serif;
font-size: 12px;
}
table {
border-width: 1px;
border-spacing: 2px;
border-color: gray;
border-collapse:collapse;
}
table td, table th{
border-width: 1px;
padding: 1px;
border-style: solid;
border-color: gray;
}
pre {
margin: 0;
padding: 0;
}
td.nxt {
color: grey;
word-break: break-all;
}
td.win {
font-weight: bold;
word-break: break-all;
}
</style>
</head>
<body>
'''
RPT_FOOT = '''
</body>
</html>
'''
def do_report(fn, lex='', ed_=ed):
def hard_word_wrap(text, rmax):
reShift = re.compile(r'\s*')
reHeadTail = re.compile(r'(.{' + str(rmax) + r'}\S*)\s*(.*)')
src_lines = text.splitlines()
pass; #print('src_lines=',src_lines)
trg_lines = []
for line in src_lines:
pass; #print('line=', line, 'len=', len(line.rstrip()))
if len(line.rstrip()) <= rmax:
trg_lines.append(line)
continue
shift = reShift.match(line).group(0)
head, \
tail = reHeadTail.match(line).group(1, 2)
if not tail:
tail= line.split()[-1]
head= line[:-len(tail)]
pass; #print('head=', head, 'tail=', tail)
trg_lines.append(head)
trg_lines.append(shift+tail)
pass; #print('trg_lines=',trg_lines)
return '\n'.join(trg_lines)
#def hard_word_wrap
# lex = ed_.get_prop(app.PROP_LEXER_CARET)
def_json = apx.get_def_setting_dir() +os.sep+'default.json'
usr_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'user.json'
lex_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+lex if lex else ''
def_opts = apx._get_file_opts(def_json, {}, object_pairs_hook=collections.OrderedDict)
usr_opts = apx._get_file_opts(usr_json, {}, object_pairs_hook=collections.OrderedDict)
lex_opts = apx._get_file_opts(lex_json, {}, object_pairs_hook=collections.OrderedDict) if lex else None
def_opts = pickle.loads(pickle.dumps(def_opts)) # clone to pop
usr_opts = pickle.loads(pickle.dumps(usr_opts)) # clone to pop
lex_opts = pickle.loads(pickle.dumps(lex_opts)) if lex else {} # clone to pop
fil_opts = {op:ed_.get_prop(pr) for op,pr in apx.OPT2PROP.items()}
# fil_opts = get_ovrd_ed_opts(ed)
cmt_opts = {}
# Find Commentary for def opts in def file
# Rely: _commentary_ is some (0+) lines between opt-line and prev opt-line
def_body = open(def_json).read()
def_body = def_body.replace('\r\n', '\n').replace('\r', '\n')
def_body = def_body[def_body.find('{')+1:] # Cut head with start '{'
def_body = def_body.lstrip()
for opt in def_opts.keys():
pos_opt = def_body.find('"{}"'.format(opt))
cmt = def_body[:pos_opt].strip()
cmt = ('\n\n'+cmt).split('\n\n')[-1]
cmt = re.sub('^\s*//', '', cmt, flags=re.M)
cmt = cmt.strip()
cmt_opts[opt] = html.escape(cmt)
def_body= def_body[def_body.find('\n', pos_opt)+1:] # Cut the opt
with open(fn, 'w', encoding='utf8') as f:
f.write(RPT_HEAD)
f.write('<h4>High priority: editor options</h4>\n')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th>Option name</th>\n')
f.write( '<th>Value in<br>default</th>\n')
f.write( '<th>Value in<br>user</th>\n')
f.write( '<th>Value in<br>{}</th>\n'.format(lex)) if lex else None
f.write( '<th title="{}">Value for file<br>{}</th>\n'.format(ed_.get_filename()
, os.path.basename(ed_.get_filename())))
f.write( '<th>Comment</th>\n')
f.write( '</tr>\n')
for opt in fil_opts.keys():
winner = 'def'
winner = 'usr' if opt in usr_opts else winner
winner = 'lex' if opt in lex_opts else winner
winner = 'fil' if opt in fil_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='def' else 'nxt', def_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='fil' else 'nxt', fil_opts.get(opt, '')))
# f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '<td><pre>{}</pre></td>\n'.format(hard_word_wrap(cmt_opts.get(opt, ''), 50)))
f.write( '</tr>\n')
def_opts.pop(opt, None)
usr_opts.pop(opt, None)
lex_opts.pop(opt, None) if lex else None
f.write('</table><br/>\n')
f.write('<h4>Overridden default options</h4>\n')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th width="15%">Option name</th>\n')
f.write( '<th width="20%">Value in<br>default</th>\n')
f.write( '<th width="20%">Value in<br>user</th>\n')
f.write( '<th width="10%">Value in<br>{}<br></th>\n'.format(lex)) if lex else None
f.write( '<th width="35%">Comment</th>\n')
f.write( '</tr>\n')
for opt in def_opts.keys():
winner = 'def'
winner = 'usr' if opt in usr_opts else winner
winner = 'lex' if opt in lex_opts else winner
winner = 'fil' if opt in fil_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='def' else 'nxt', def_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td><pre>{}</pre></td>\n'.format(hard_word_wrap(cmt_opts.get(opt, ''), 50)))
f.write( '</tr>\n')
usr_opts.pop(opt, None)
lex_opts.pop(opt, None) if lex else None
f.write('</table><br/>\n')
f.write('<h4>Overridden user-only options</h4>')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th>Option name</th>\n')
f.write( '<th>Value in<br>user</th>\n')
f.write( '<th>Value in<br>{}</th>\n'.format(lex)) if lex else None
f.write( '<th>Comment</th>\n')
f.write( '</tr>\n')
for opt in usr_opts.keys():
winner = 'usr'
winner = 'lex' if opt in lex_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '</tr>\n')
lex_opts.pop(opt, None) if lex else None
for opt in lex_opts.keys():
winner = 'lex'
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}"></td> \n'.format('non'))
f.write( '<td class="{}">{}</td>\n'.format('win', lex_opts.get(opt, '')))
f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '</tr>\n')
lex_opts.pop(opt, None)
f.write('</table><br/>\n')
f.write(RPT_FOOT)
return True
#def do_report(fn):
def index_1(cllc, val, defans=-1):
return cllc.index(val) if val in cllc else defans
if __name__ == '__main__' : # Tests
# To start the tests run in Console
# exec(open(path_to_the_file, encoding="UTF-8").read())
# app.app_log(app.LOG_CONSOLE_CLEAR, 'm')
# for smk in [smk for smk
# in sys.modules if 'cuda_options_editor.tests.test_options_editor' in smk]:
# del sys.modules[smk] # Avoid old module
# import cuda_options_editor.tests.test_options_editor
# import unittest
# suite = unittest.TestLoader().loadTestsFromModule( cuda_options_editor.tests.test_options_editor)
# unittest.TextTestRunner(verbosity=0).run(suite)
pass
'''
ToDo
[+][kv-kv][02apr17] History for cond
[-][kv-kv][02apr17] ? Chapters list and "chap" attr into kinfo
[-][kv-kv][02apr17] ? Tags list and "tag" attr into kinfo
[-][kv-kv][02apr17] ? Delimeter row in table
[ ][kv-kv][02apr17] "Need restart" in Comments
[+][kv-kv][02apr17] ? Calc Format by Def_val
[ ][kv-kv][02apr17] int_mm for min+max
[+][kv-kv][02apr17] VERS in Title
[+][at-kv][02apr17] 'enum' вместо 'enum_i'
[ ][kv-kv][02apr17] Save top row in table
[+][kv-kv][03apr17] Show stat in Chap-combo and tags check-list
[-][kv-kv][03apr17] ? Add chap "(No chapter)"
[-][kv-kv][03apr17] ? Add tag "#no_tag"
[+][kv-kv][03apr17] Call opts report
[+][at-kv][04apr17] Format 'font'
[-][at-kv][04apr17] ? FilterListView
[+][at-kv][04apr17] use new default.json
[-][kv-kv][04apr17] Testing for update user.json
[+][kv-kv][04apr17] Restore Sec and Tags
[+][kv-kv][04apr17] ro-combo hitory for Tags
[+][kv-kv][05apr17] Add "default" to fonts if def_val=="default"
[+][at-kv][05apr17] Preview for format=fontmay
[+][kv-kv][06apr17] Spec filter sign: * - to show only modified
[-][kv-kv][06apr17] Format color
[+][kv-kv][24apr17] Sort as Def or as User
[+][kv-kv][05may17] New type "list of str"
[ ][kv-kv][23jun17] ? Filter with tag (part of tag?). "smth #my"
[+][kv-kv][15mar18] ? Filter with all text=key+comment
[+][kv-kv][19mar18] ? First "+" to filter with comment
[-][kv-kv][19mar18] !! Point the fact if value is overed in ed
[?][kv-kv][20mar18] Allow to add/remove opt in user/lex
[?][kv-kv][21mar18] ? Allow to meta keys in user.json:
"_fif_LOG__comment":"Comment for fif_LOG"
[+][kv-kv][22mar18] Set conrol's tab_order to always work Alt+E for "Valu&e"
[ ][kv-kv][26mar18] Use 'editor' for comment
[+][kv-kv][26mar18] Increase w for one col when user increases w of dlg (if no h-scroll)
[+][kv-kv][13apr18] DClick on Def-col - focus to Reset
[-][kv-kv][16apr18] Open in tag for fmt=json
[?][kv-kv][23apr18] ? Show opt from cur line if ed(default.json)
[+][at-kv][03may18] Rework ask to confirm removing user/lex opt
[+][at-kv][04may18] Report to console all changes
[+][at-kv][05may18] Call OpsReloadAndApply
[+][kv-kv][05may18] Rework radio to checks (Linux bug: always set one of radio-buttons)
[-][kv-kv][05may18] Ask "Set also for current file?" if ops is ed.prop
[+][kv-kv][06may18] Menu command "Show changes"
[+][kv-kv][06may18] Show all file opt value. !!! only if val!=over-val
[+][kv-kv][06may18] Rework Sort
[+][kv-kv][14may18] Scale def col widths
[ ][at-kv][14may18] DClick over 1-2-3 is bad
[+][at-kv][14may18] Allow to refresh table on each changing of filter
[+][at-kv][15may18] Allow to extra sort cols with Ctrl+Click
[ ][kv-kv][04jun18] Cannot select section @Ui after selected @Ui/Tabs
[ ][kv-kv][16jun18] Have 2 filter control to instant and history. Switch by vis
[+][kv-kv][18jun18] More then one chap in filter. Append from menu if Ctrl holds
[+][at-kv][24apr19] Add types: rgb
[ ][at-kv][24apr19] Add type condition: int/float range
[+][kv-kv][25apr19] Hide cols "Lexer" and "File", controls []For and lexer list (by init opt)
[+][kv-kv][25apr19] Allow store other then user.json
[+][kv-kv][25apr19] Return 'was modified' from show()
''' | vhanla/CudaText | app/py/cuda_options_editor/cd_opts_dlg.py | Python | mpl-2.0 | 113,411 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import absolute_import
from django.core.exceptions import ValidationError
import pytest
from pytest_pootle.factories import VirtualFolderDBFactory
from pootle_language.models import Language
from pootle_store.models import Store
from virtualfolder.models import VirtualFolder
@pytest.mark.django_db
def test_vfolder_priority_not_greater_than_zero(tp0):
"""Tests that the creation of a virtual folder fails if the provided
priority is not greater than zero.
"""
# Test priority less than zero.
vfolder_item = {
'name': "whatever",
'priority': -3,
'is_public': True,
'filter_rules': "browser/defines.po",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Priority must be greater than zero.' in str(excinfo.value)
# Test zero priority.
vfolder_item['priority'] = 1
vfolder = VirtualFolder.objects.create(**vfolder_item)
vfolder.priority = 0
with pytest.raises(ValidationError) as excinfo:
vfolder.save()
assert u'Priority must be greater than zero.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_with_no_filter_rules():
"""Tests that the creation of a virtual folder fails if it doesn't have any
filter rules.
"""
vfolder_item = {
'name': "whatever",
'priority': 4,
'is_public': True,
'filter_rules': "",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Some filtering rule must be specified.' in str(excinfo.value)
vfolder_item["filter_rules"] = "FOO"
vf = VirtualFolder.objects.create(**vfolder_item)
vf.filter_rules = ""
with pytest.raises(ValidationError) as excinfo:
vf.save()
assert u'Some filtering rule must be specified.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_membership(tp0, store0):
tp0_stores = ",".join(
p[len(tp0.pootle_path):]
for p in tp0.stores.values_list("pootle_path", flat=True))
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
filter_rules=store0.name)
vf0.projects.add(tp0.project)
vf0.languages.add(tp0.language)
vf0.save()
assert vf0.stores.count() == 1
assert vf0.stores.first() == store0
vf1 = VirtualFolder.objects.create(
name="vf1",
title="the vf1",
filter_rules=tp0_stores)
vf1.projects.add(tp0.project)
vf1.languages.add(tp0.language)
vf1.save()
assert (
list(vf1.stores.order_by("pk"))
== list(tp0.stores.order_by("pk")))
store_name = vf1.filter_rules.split(",")[0]
vf1.filter_rules = ",".join(vf1.filter_rules.split(",")[1:])
store = vf1.stores.get(name=store_name)
vf1.save()
assert store not in vf1.stores.all()
vf1.filter_rules = ",".join([store_name, vf1.filter_rules])
vf1.save()
assert store in vf1.stores.all()
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_store_priorities(project0):
# remove the default vfolders and update units to reset priorities
VirtualFolder.objects.all().delete()
assert all(
priority == 1
for priority
in Store.objects.values_list("priority", flat=True))
vfolder0 = VirtualFolderDBFactory(filter_rules="store0.po", name="FOO")
vfolder0.priority = 3
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder0.filter_rules = "store1.po"
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder1 = VirtualFolderDBFactory(
filter_rules="store1.po")
vfolder1.languages.add(*Language.objects.all())
vfolder1.projects.add(project0)
vfolder1.priority = 4
vfolder1.save()
vfolder1_stores = vfolder1.stores.values_list("pk", flat=True)
assert all(
priority == 4.0
for priority
in Store.objects.filter(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 3.0
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
@pytest.mark.django_db
def test_virtualfolder_repr():
vf = VirtualFolderDBFactory(filter_rules="store0.po")
assert (
"<VirtualFolder: %s>" % (vf.name)
== repr(vf))
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_calc_priority(settings, store0):
vf = store0.vfolders.first()
vf.priority = 5
vf.save()
assert store0.calculate_priority() == 5.0
settings.INSTALLED_APPS.remove("virtualfolder")
assert store0.calculate_priority() == 1.0
settings.INSTALLED_APPS.append("virtualfolder")
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_membership_new_store(tp0):
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
priority=7.0,
all_languages=True,
all_projects=True,
filter_rules="wierd.po")
wierd_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="wierd.po")
wierd_store.set_priority()
assert wierd_store in vf0.stores.all()
assert Store.objects.get(pk=wierd_store.pk).priority == 7
normal_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="normal.po")
assert normal_store not in vf0.stores.all()
assert Store.objects.get(pk=normal_store.pk).priority == 1.0
vf0.delete()
assert Store.objects.get(pk=wierd_store.pk).priority == 1.0
| claudep/pootle | tests/models/virtualfolder.py | Python | gpl-3.0 | 7,012 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scattergl.marker.colorbar.title.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scattergl/marker/colorbar/title/font/_color.py | Python | mit | 456 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GridRNN cells
## This package provides classes for GridRNN
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/grid_rnn/__init__.py | Python | bsd-2-clause | 1,060 |
"""
"""
import os
import tempfile
import unittest
from .context import fabulist
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def setUp(self):
self.fab = fabulist.Fabulist()
self.temp_path = None
def tearDown(self):
self.fab = None
if self.temp_path:
os.remove(self.temp_path)
def test_basic(self):
fab = self.fab
word_list = fab.list_map["noun"]
assert len(word_list.key_list) == 0
fab.get_word("noun")
assert len(word_list.key_list) > 0
def test_quotes(self):
from .demo import demo_quotes
demo_quotes()
def test_names(self):
name = self.fab.get_name()
assert len(name) > 2 and " " in name, "Names have first and last as default"
name = self.fab.get_name(":first")
assert " " not in name, "name:first does not include :last"
name = self.fab.get_name(":last")
assert " " not in name, "name:last does not include :first"
def test_pick(self):
for _ in range(100):
val = self.fab.get_quote("$(pick:foo,bar,b\,az)")
assert val in ("foo", "bar", "b,az"), "Pick value"
val = self.fab.get_quote("$(pick:abc)")
assert val in ("a", "b", "c"), "Pick character"
val = self.fab.get_quote("$(pick:!\\,\\:)")
assert val in ("!", ",", ":"), "Pick special character"
return
def test_numbers(self):
for _ in range(100):
num = self.fab.get_quote("$(num)")
assert 0 <= int(num) <= 99, "Default number: 0..99"
num = self.fab.get_quote("$(num:-9,-1)")
assert len(num) == 2
assert num[0] == "-"
num = self.fab.get_quote("$(num:1,999,3)")
assert 1 <= int(num) <= 999, "Default number: 1..999"
assert len(num) == 3, "Zeropadding"
num = self.fab.get_quote("$(num:1,9,3)")
assert 1 <= int(num) <= 9, "Default number: 1..9"
assert len(num) == 3, "Zeropadding"
assert num[0] == num[1] == "0"
return
def test_validations(self):
with self.assertRaises(ValueError):
self.fab.get_word("unkonwn_type")
with self.assertRaises(ValueError):
self.fab.get_word("noun", "unkonwn_mod")
with self.assertRaises(ValueError):
self.fab.get_word("noun", "an:an")
with self.assertRaises(ValueError):
self.fab.get_word("noun", "an:#animal:#animal")
with self.assertRaises(ValueError):
self.fab.get_word("noun", "#unknown_tag")
def test_to_string(self):
s = "{}".format(self.fab.list_map["adj"])
assert s.startswith("AdjList(len=")
nl = self.fab.list_map["noun"]
s = "{}".format(fabulist.fabulist.Macro("noun", "an:plural:#animal", nl))
assert s == "$(noun:plural:an:#animal)"
def test_save(self):
self.temp_path = tempfile.mktemp()
wl = self.fab.list_map["adj"]
self.fab.load()
wl.save_as(self.temp_path)
assert os.path.getsize(self.temp_path) > 1000
def test_lorem_sentence(self):
fab = self.fab
res = fab.get_lorem_sentence(entropy=0)
assert res.startswith("Lorem ipsum")
res = fab.get_lorem_sentence(entropy=1)
assert res[0].isupper() and res[-1] == "."
res = fab.get_lorem_sentence(entropy=2)
assert res[0].isupper() and res[-1] == "."
res = fab.get_lorem_sentence(10)
assert res[0].isupper() and res[-1] == "."
assert res.count(" ") == 9
res = fab.get_lorem_sentence(dialect="pulp", entropy=0)
assert res == "Do you see any Teletubbies in here?"
def test_lorem_paragraph(self):
fab = self.fab
res = fab.get_lorem_paragraph(3, entropy=0)
assert res.startswith("Lorem ipsum")
assert res.count(".") == 3
res = fab.get_lorem_paragraph(3, entropy=1)
assert res.count(".") == 3
res = fab.get_lorem_paragraph(3, entropy=2, keep_first=True)
assert res.startswith("Lorem ipsum")
assert res.count(".") == 3
res = fab.get_lorem_paragraph(3, entropy=3, keep_first=True)
assert res.startswith("Lorem ipsum")
assert res.count(".") == 3
def test_lorem_text(self):
fab = self.fab
res = fab.get_lorem_text(3, keep_first=True, entropy=3)
assert res.count("\n") == 2
assert res.startswith("Lorem ipsum")
def test_lorem_demo(self):
from .demo_lorem import demo_lorem
demo_lorem()
def test_infinite(self):
# We can produce endless quotes
for i, quote in enumerate(
self.fab.generate_quotes("$(noun)", count=None, dedupe=False)
):
if i > 5000:
break
# but dedupe may raise RuntimeError
with self.assertRaises(RuntimeError):
for i, s in enumerate(
self.fab.generate_quotes("$(noun)", count=None, dedupe=True)
):
if i > 5000:
break
class LoremTestSuite(unittest.TestCase):
"""Test LoremGenerator and LoremDialect."""
def setUp(self):
self.fab = fabulist.Fabulist()
self.lorem = self.fab.lorem
def tearDown(self):
self.lorem = None
def test_validations(self):
with self.assertRaises(ValueError):
list(self.lorem.generate_words(2, dialect="unknown_dialect"))
def test_words(self):
lorem = self.lorem
dialect = lorem._get_lorem("ipsum")
res = list(lorem.generate_words(3, entropy=0))
assert len(res) == 3
assert dialect.sentences[0].lower().startswith(res[0])
res = list(self.lorem.generate_words(3, entropy=1))
assert len(res) == 3
res = list(self.lorem.generate_words(3, entropy=2))
assert len(res) == 3
res = list(self.lorem.generate_words(3, entropy=3))
assert len(res) == 3
def test_sentences(self):
res = list(self.lorem.generate_sentences(3, entropy=0))
assert len(res) == 3
assert res[0].startswith("Lorem ipsum")
res = list(self.lorem.generate_sentences(3, entropy=2, keep_first=True))
assert len(res) == 3
assert res[0].startswith("Lorem ipsum")
res = list(
self.lorem.generate_sentences(
3, keep_first=True, words_per_sentence=(3, 10)
)
)
assert len(res) == 3
assert res[0].startswith("Lorem ipsum")
res = list(self.lorem.generate_sentences(3, entropy=3, keep_first=True))
assert len(res) == 3
def test_paragraphs(self):
res = list(self.lorem.generate_paragraphs(3, entropy=0))
assert len(res) == 3
assert res[0].startswith("Lorem ipsum")
res = list(self.lorem.generate_paragraphs(3, entropy=2, keep_first=True))
assert len(res) == 3
assert res[0].startswith("Lorem ipsum")
def test_infinite(self):
# Words are flowing out like endless rain into a paper cup...
for i, word in enumerate(self.lorem.generate_words()):
if i > 1000:
break
if __name__ == "__main__":
unittest.main()
| mar10/fabulist | tests/test.py | Python | mit | 7,326 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_siteconfiguration_payment_support_url'),
]
operations = [
migrations.AddField(
model_name='siteconfiguration',
name='affiliate_cookie_name',
field=models.CharField(default=b'', help_text='Name of cookie storing affiliate data.', max_length=255, verbose_name='Affiliate Cookie Name', blank=True),
),
migrations.AddField(
model_name='siteconfiguration',
name='utm_cookie_name',
field=models.CharField(default=b'', help_text='Name of cookie storing UTM data.', max_length=255, verbose_name='UTM Cookie Name', blank=True),
),
]
| eduNEXT/edunext-ecommerce | ecommerce/core/migrations/0019_auto_20161012_1404.py | Python | agpl-3.0 | 794 |
import ckan.plugins as p
import paste.script
import db
import logging
from ckan.lib.cli import CkanCommand
from ckan import model
log = logging.getLogger(__name__)
class WebhookCommands(CkanCommand):
"""
ckanext-webhooks commands:
Usage::
paster webhooks migrate
"""
summary = __doc__.split('\n')[0]
usage = __doc__
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='development.ini', help='Config file to use.')
def command(self):
if not len(self.args):
print self.__doc__
return
cmd = self.args[0]
self._load_config()
if cmd == 'migrate':
self._migrate()
else:
print self.__doc__
def _migrate(self):
if not db.webhook_table.exists():
db.webhook_table.create()
log.info('Webhooks table created')
else:
log.warning('Webhooks table already exists')
print 'Webhooks table already exists'
| deniszgonjanin/ckanext-webhooks | ckanext/webhooks/commands.py | Python | agpl-3.0 | 1,082 |
"""
Offer template automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#template-trigger
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import CONF_VALUE_TEMPLATE, CONF_PLATFORM
from homeassistant.helpers.event import async_track_template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = IF_ACTION_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'template',
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
})
def async_trigger(hass, config, action):
"""Listen for state changes based on configuration."""
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = hass
@callback
def template_listener(entity_id, from_s, to_s):
"""Listen for state changes and calls action."""
hass.async_run_job(action, {
'trigger': {
'platform': 'template',
'entity_id': entity_id,
'from_state': from_s,
'to_state': to_s,
},
})
return async_track_template(hass, value_template, template_listener)
| kyvinh/home-assistant | homeassistant/components/automation/template.py | Python | apache-2.0 | 1,267 |
vm_ip = '192.168.76.50'
username = 'admin'
password = ''
config_file_1 = 'fortios_config_1.txt'
config_file_2 = 'fortios_config_2.txt' | sergiorua/pyfg | test/unit/config.py | Python | apache-2.0 | 134 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.coordinates.attributes import TimeAttribute, QuantityAttribute
from .utils import EQUINOX_J2000, DEFAULT_OBSTIME
__all__ = ['GeocentricMeanEcliptic', 'BarycentricMeanEcliptic',
'HeliocentricMeanEcliptic', 'BaseEclipticFrame',
'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic',
'HeliocentricTrueEcliptic',
'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic']
doc_components_ecl = """
lon : `~astropy.coordinates.Angle`, optional, must be keyword
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `~astropy.coordinates.Angle`, optional, must be keyword
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The distance for this object from the {0}.
(``representation`` must be None).
pm_lon_coslat : `~astropy.coordinates.Angle`, optional, must be keyword
The proper motion in the ecliptic longitude (including the ``cos(lat)``
factor) for this object (``pm_lat`` must also be given).
pm_lat : `~astropy.coordinates.Angle`, optional, must be keyword
The proper motion in the ecliptic latitude for this object
(``pm_lon_coslat`` must also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
@format_doc(base_doc,
components=doc_components_ecl.format('specified location'),
footer="")
class BaseEclipticFrame(BaseCoordinateFrame):
"""
A base class for frames that have names and conventions like that of
ecliptic frames.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
"""
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
doc_footer_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems). Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Earth. Defaults to J2000.
"""
@format_doc(base_doc, components=doc_components_ecl.format('geocenter'),
footer=doc_footer_geo)
class GeocentricMeanEcliptic(BaseEclipticFrame):
"""
Geocentric mean ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *mean* (not true) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format('geocenter'),
footer=doc_footer_geo)
class GeocentricTrueEcliptic(BaseEclipticFrame):
"""
Geocentric true ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
doc_footer_bary = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
"""
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer=doc_footer_bary)
class BarycentricMeanEcliptic(BaseEclipticFrame):
"""
Barycentric mean ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer=doc_footer_bary)
class BarycentricTrueEcliptic(BaseEclipticFrame):
"""
Barycentric true ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
doc_footer_helio = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Sun. Defaults to J2000.
"""
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio)
class HeliocentricMeanEcliptic(BaseEclipticFrame):
"""
Heliocentric mean ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio)
class HeliocentricTrueEcliptic(BaseEclipticFrame):
"""
Heliocentric true ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"),
footer="")
class HeliocentricEclipticIAU76(BaseEclipticFrame):
"""
Heliocentric mean (IAU 1976) ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic of J2000 (according to the IAU 1976/1980 obliquity model).
It has, therefore, a fixed equinox and an older obliquity value
than the rest of the frames.
The frame attributes are listed under **Other Parameters**.
{params}
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"),
footer="")
class CustomBarycentricEcliptic(BaseEclipticFrame):
"""
Barycentric ecliptic coordinates with custom obliquity.
These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic tilted a custom obliquity angle.
The frame attributes are listed under **Other Parameters**.
"""
obliquity = QuantityAttribute(default=84381.448 * u.arcsec, unit=u.arcsec)
| stargaser/astropy | astropy/coordinates/builtin_frames/ecliptic.py | Python | bsd-3-clause | 9,417 |
import LNdigitalIO
def switch_pressed(event):
event.chip.output_pins[event.pin_num].turn_on()
def switch_unpressed(event):
event.chip.output_pins[event.pin_num].turn_off()
if __name__ == "__main__":
LNdigital = LNdigitalIO.LNdigitals()
listener = LNdigitalIO.InputEventListener(chip=LNdigital)
for i in range(4):
listener.register(i, LNdigitalIO.IODIR_ON, switch_pressed)
listener.register(i, LNdigitalIO.IODIR_OFF, switch_unpressed)
listener.activate()
| LeMaker/LNdigitalIO | examples/presslights.py | Python | gpl-3.0 | 501 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.