repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/runtime
|
third_party/llvm/expand_cmake_vars.py
|
1
|
2647
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
|
apache-2.0
| -6,928,396,880,634,280,000
| 28.741573
| 80
| 0.649037
| false
| 3.567385
| false
| false
| false
|
ericholscher/django
|
django/contrib/admindocs/views.py
|
1
|
15158
|
from importlib import import_module
import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils._os import upath
from django.utils import six
from django.utils.translation import ugettext as _
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': "%s://%s%s" % (request.scheme, request.get_host(), admin_root),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.model_name == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
# Translators: %s is an object type name
'summary': _("Attributes on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
template.get_library(library_name)
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
bsd-3-clause
| -5,536,292,160,864,529,000
| 37.866667
| 143
| 0.591701
| false
| 3.981613
| false
| false
| false
|
mlcommons/training
|
translation/tensorflow/process_data.py
|
1
|
15302
|
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import tarfile
import urllib
import six
import tensorflow as tf
import urllib.request
from mlperf_compliance import mlperf_log
from utils import tokenizer
# Data sources for training/evaluating the transformer translation model.
# If any of the training sources are changed, then either:
# 1) use the flag `--search` to find the best min count or
# 2) update the _TRAIN_DATA_MIN_COUNT constant.
# min_count is the minimum number of times a token must appear in the data
# before it is added to the vocabulary. "Best min count" refers to the value
# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
# Use pre-defined minimum count to generate subtoken vocabulary.
_TRAIN_DATA_MIN_COUNT = 6
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
# Vocabulary constants
_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list.
_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold
_VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE
# Strings to inclue in the generated files.
_PREFIX = "wmt32k"
_COMPILE_TAG = "compiled"
_ENCODE_TAG = "encoded"
_TRAIN_TAG = "train"
_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the
# evaluation datasets are tagged as "dev" for development.
# Number of files to split train and evaluation data
_TRAIN_SHARDS = 100
_EVAL_SHARDS = 1
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
tf.logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
tf.logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip()
def compile_files(data_dir, raw_files, tag):
"""Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
"""
tf.logging.info("Compiling files with tag %s." % tag)
filename = "%s-%s-%s" % (_PREFIX, _COMPILE_TAG, tag)
input_compiled_file = os.path.join(data_dir, filename + ".lang1")
target_compiled_file = os.path.join(data_dir, filename + ".lang2")
with tf.gfile.Open(input_compiled_file, mode="w") as input_writer:
with tf.gfile.Open(target_compiled_file, mode="w") as target_writer:
for i in range(len(raw_files["inputs"])):
input_file = raw_files["inputs"][i]
target_file = raw_files["targets"][i]
tf.logging.info("Reading files %s and %s." % (input_file, target_file))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return input_compiled_file, target_compiled_file
def write_file(writer, filename):
"""Write all of lines from file using the writer."""
for line in txt_line_iterator(filename):
writer.write(line)
writer.write("\n")
###############################################################################
# Data preprocessing
###############################################################################
def encode_and_save_files(
subtokenizer, data_dir, raw_files, tag, total_shards):
"""Save data from files as encoded Examples in TFrecord format.
Args:
subtokenizer: Subtokenizer object that will be used to encode the strings.
data_dir: The directory in which to write the examples
raw_files: A tuple of (input, target) data files. Each line in the input and
the corresponding line in target file will be saved in a tf.Example.
tag: String that will be added onto the file names.
total_shards: Number of files to divide the data into.
Returns:
List of all files produced.
"""
# Create a file for each shard.
filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)
for n in range(total_shards)]
if all_exist(filepaths):
tf.logging.info("Files with tag %s already exist." % tag)
return filepaths
tf.logging.info("Saving files with tag %s." % tag)
input_file = raw_files[0]
target_file = raw_files[1]
# Write examples to each shard in round robin order.
tmp_filepaths = [fname + ".incomplete" for fname in filepaths]
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]
counter, shard = 0, 0
for counter, (input_line, target_line) in enumerate(zip(
txt_line_iterator(input_file), txt_line_iterator(target_file))):
if counter > 0 and counter % 100000 == 0:
tf.logging.info("\tSaving case %d." % counter)
example = dict_to_example(
{"inputs": subtokenizer.encode(input_line, add_eos=True),
"targets": subtokenizer.encode(target_line, add_eos=True)})
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % total_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filepaths, filepaths):
tf.gfile.Rename(tmp_name, final_name)
if tag == _TRAIN_TAG:
mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES,
value=counter)
elif tag == _EVAL_TAG:
mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES,
value=counter)
tf.logging.info("Saved %d Examples", counter)
return filepaths
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%s-%.5d-of-%.5d" %
(_PREFIX, _ENCODE_TAG, tag, shard_num, total_shards))
def shuffle_records(fname):
"""Shuffle records in a single file."""
tf.logging.info("Shuffling records in file %s" % fname)
# Rename file prior to shuffling
tmp_fname = fname + ".unshuffled"
tf.gfile.Rename(fname, tmp_fname)
reader = tf.python_io.tf_record_iterator(tmp_fname)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("\tRead: %d", len(records))
random.shuffle(records)
# Write shuffled records to original file name
with tf.python_io.TFRecordWriter(fname) as w:
for count, record in enumerate(records):
w.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("\tWriting record: %d" % count)
tf.gfile.Remove(tmp_fname)
def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features))
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True
def make_dir(path):
if not tf.gfile.Exists(path):
tf.logging.info("Creating directory %s" % path)
tf.gfile.MakeDirs(path)
def main(unused_argv):
"""Obtain training and evaluation data for the Transformer model."""
tf.logging.set_verbosity(tf.logging.INFO)
make_dir(FLAGS.raw_dir)
make_dir(FLAGS.data_dir)
# Get paths of download/extracted training and evaluation files.
tf.logging.info("Step 1/4: Downloading data from source")
train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES)
eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES)
# Create subtokenizer based on the training files.
tf.logging.info("Step 2/4: Creating subtokenizer and building vocabulary")
train_files_flat = train_files["inputs"] + train_files["targets"]
vocab_file = os.path.join(FLAGS.data_dir, _VOCAB_FILE)
subtokenizer = tokenizer.Subtokenizer.init_from_files(
vocab_file, train_files_flat, _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,
min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)
tf.logging.info("Step 3/4: Compiling training and evaluation data")
compiled_train_files = compile_files(FLAGS.data_dir, train_files, _TRAIN_TAG)
compiled_eval_files = compile_files(FLAGS.data_dir, eval_files, _EVAL_TAG)
# Tokenize and save data as Examples in the TFRecord format.
tf.logging.info("Step 4/4: Preprocessing and saving data")
mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING)
train_tfrecord_files = encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,
_TRAIN_SHARDS)
mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_EVAL)
encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,
_EVAL_SHARDS)
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
for fname in train_tfrecord_files:
shuffle_records(fname)
if __name__ == "__main__":
mlperf_log.ROOT_DIR_TRANSFORMER = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", "-dd", type=str, default="/tmp/translate_ende",
help="[default: %(default)s] Directory for where the "
"translate_ende_wmt32k dataset is saved.",
metavar="<DD>")
parser.add_argument(
"--raw_dir", "-rd", type=str, default="/tmp/translate_ende_raw",
help="[default: %(default)s] Path where the raw data will be downloaded "
"and extracted.",
metavar="<RD>")
parser.add_argument(
"--search", action="store_true",
help="If set, use binary search to find the vocabulary set with size"
"closest to the target size (%d)." % _TARGET_VOCAB_SIZE)
FLAGS, unparsed = parser.parse_known_args()
main(sys.argv)
|
apache-2.0
| 2,412,527,883,679,697,400
| 34.09633
| 80
| 0.668213
| false
| 3.640733
| false
| false
| false
|
imankulov/sentry
|
src/sentry/plugins/sentry_mail/models.py
|
1
|
5759
|
"""
sentry.plugins.sentry_mail.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from sentry.plugins import register
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.utils.cache import cache
from sentry.utils.email import MessageBuilder, group_id_to_email
from sentry.utils.http import absolute_uri
NOTSET = object()
class MailPlugin(NotificationPlugin):
title = 'Mail'
conf_key = 'mail'
slug = 'mail'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
project_conf_form = None
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
def _send_mail(self, subject, template=None, html_template=None, body=None,
project=None, group=None, headers=None, context=None):
send_to = self.get_send_to(project)
if not send_to:
return
subject_prefix = self.get_option('subject_prefix', project) or self.subject_prefix
subject_prefix = force_text(subject_prefix)
subject = force_text(subject)
msg = MessageBuilder(
subject='%s%s' % (subject_prefix, subject),
template=template,
html_template=html_template,
body=body,
headers=headers,
context=context,
reference=group,
)
msg.add_users(send_to, project=project)
return msg.send()
def send_test_mail(self, project=None):
self._send_mail(
subject='Test Email',
body='This email was requested as a test of Sentry\'s outgoing email',
project=project,
)
def get_notification_settings_url(self):
return absolute_uri(reverse('sentry-account-settings-notifications'))
def get_project_url(self, project):
return absolute_uri(reverse('sentry-stream', args=[
project.organization.slug,
project.slug,
]))
def should_notify(self, group, event):
send_to = self.get_sendable_users(group.project)
if not send_to:
return False
return super(MailPlugin, self).should_notify(group, event)
def get_send_to(self, project=None):
"""
Returns a list of email addresses for the users that should be notified of alerts.
The logic for this is a bit complicated, but it does the following:
The results of this call can be fairly expensive to calculate, so the send_to list gets cached
for 60 seconds.
"""
if project:
project_id = project.pk
else:
project_id = ''
if not (project and project.team):
return []
conf_key = self.get_conf_key()
cache_key = '%s:send_to:%s' % (conf_key, project_id)
send_to_list = cache.get(cache_key)
if send_to_list is None:
send_to_list = self.get_sendable_users(project)
send_to_list = filter(bool, send_to_list)
cache.set(cache_key, send_to_list, 60) # 1 minute cache
return send_to_list
def notify(self, notification):
event = notification.event
group = event.group
project = group.project
interface_list = []
for interface in event.interfaces.itervalues():
body = interface.to_email_html(event)
if not body:
continue
text_body = interface.to_string(event)
interface_list.append(
(interface.get_title(), mark_safe(body), text_body)
)
subject = group.get_email_subject()
link = group.get_absolute_url()
template = 'sentry/emails/error.txt'
html_template = 'sentry/emails/error.html'
rules = []
for rule in notification.rules:
rule_link = reverse('sentry-edit-project-rule', args=[
group.organization.slug, project.slug, rule.id
])
rules.append((rule.label, rule_link))
context = {
'project_label': project.get_full_name(),
'group': group,
'event': event,
'tags': event.get_tags(),
'link': link,
'interfaces': interface_list,
'rules': rules,
}
headers = {
'X-Sentry-Logger': group.logger,
'X-Sentry-Logger-Level': group.get_level_display(),
'X-Sentry-Team': project.team.name,
'X-Sentry-Project': project.name,
'X-Sentry-Reply-To': group_id_to_email(group.id),
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
group=group,
headers=headers,
context=context,
)
def notify_digest(self, project, digest):
context = {
'project': project,
'digest': digest,
}
self._send_mail(
subject=render_to_string('sentry/emails/digests/subject.txt', context).rstrip(),
template='sentry/emails/digests/body.txt',
html_template='sentry/emails/digests/body.html',
project=project,
context=context,
)
# Legacy compatibility
MailProcessor = MailPlugin
register(MailPlugin)
|
bsd-3-clause
| 8,034,038,789,124,717,000
| 29.796791
| 102
| 0.592464
| false
| 4.104775
| false
| false
| false
|
psychopy/psychopy
|
psychopy/gui/wxgui.py
|
1
|
15627
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""To build simple dialogues etc. (requires wxPython)
"""
from __future__ import absolute_import, print_function
from builtins import str
from builtins import super
from builtins import range
from psychopy import logging
import wx
import numpy
import os
from psychopy.localization import _translate
from pkg_resources import parse_version
OK = wx.ID_OK
thisVer = parse_version(wx.__version__)
def ensureWxApp():
# make sure there's a wxApp prior to showing a gui, e.g., for expInfo
# dialog
try:
wx.Dialog(None, -1) # not shown; FileDialog gives same exception
return True
except wx._core.PyNoAppError:
if thisVer < parse_version('2.9'):
return wx.PySimpleApp()
elif thisVer >= parse_version('4.0') and thisVer < parse_version('4.1'):
raise Exception(
"wx>=4.0 clashes with pyglet and making it unsafe "
"as a PsychoPy gui helper. Please install PyQt (4 or 5)"
" or wxPython3 instead.")
else:
return wx.App(False)
class Dlg(wx.Dialog):
"""A simple dialogue box. You can add text or input boxes
(sequentially) and then retrieve the values.
see also the function *dlgFromDict* for an **even simpler** version
**Example:** ::
from psychopy import gui
myDlg = gui.Dlg(title="JWP's experiment")
myDlg.addText('Subject info')
myDlg.addField('Name:')
myDlg.addField('Age:', 21)
myDlg.addText('Experiment Info')
myDlg.addField('Grating Ori:',45)
myDlg.addField('Group:', choices=["Test", "Control"])
myDlg.show() # show dialog and wait for OK or Cancel
if myDlg.OK: # then the user pressed OK
thisInfo = myDlg.data
print(thisInfo)
else:
print('user cancelled')
"""
def __init__(self, title=_translate('PsychoPy dialogue'),
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE | wx.DIALOG_NO_PARENT,
labelButtonOK=_translate(" OK "),
labelButtonCancel=_translate(" Cancel ")):
style = style | wx.RESIZE_BORDER
global app # avoid recreating for every gui
if pos is None:
pos = wx.DefaultPosition
app = ensureWxApp()
super().__init__(parent=None, id=-1, title=title, style=style, pos=pos)
self.inputFields = []
self.inputFieldTypes = []
self.inputFieldNames = []
self.data = []
# prepare a frame in which to hold objects
self.sizer = wx.BoxSizer(wx.VERTICAL)
# self.addText('') # insert some space at top of dialogue
self.pos = pos
self.labelButtonOK = labelButtonOK
self.labelButtonCancel = labelButtonCancel
def addText(self, text, color=''):
# the horizontal extent can depend on the locale and font in use:
font = self.GetFont()
dc = wx.WindowDC(self)
dc.SetFont(font)
textWidth, textHeight = dc.GetTextExtent(text)
textLength = wx.Size(textWidth + 50, textHeight)
_style = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL
myTxt = wx.StaticText(self, -1, label=text, style=_style,
size=textLength)
if len(color):
myTxt.SetForegroundColour(color)
self.sizer.Add(myTxt, 1, wx.ALIGN_CENTER)
def addField(self, label='', initial='', color='', choices=None, tip=''):
"""Adds a (labelled) input field to the dialogue box, optional text
color and tooltip. Returns a handle to the field (but not to the
label). If choices is a list or tuple, it will create a dropdown
selector.
"""
self.inputFieldNames.append(label)
if choices:
self.inputFieldTypes.append(str)
else:
self.inputFieldTypes.append(type(initial))
if type(initial) == numpy.ndarray:
initial = initial.tolist() # convert numpy arrays to lists
container = wx.GridSizer(cols=2, vgap=0, hgap=10)
# create label
font = self.GetFont()
dc = wx.WindowDC(self)
dc.SetFont(font)
labelWidth, labelHeight = dc.GetTextExtent(label)
labelLength = wx.Size(labelWidth + 16, labelHeight)
inputLabel = wx.StaticText(self, -1, label,
size=labelLength,
style=wx.ALIGN_RIGHT)
if len(color):
inputLabel.SetForegroundColour(color)
_style = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT
container.Add(inputLabel, 1, _style)
# create input control
if type(initial) == bool:
inputBox = wx.CheckBox(self, -1)
inputBox.SetValue(initial)
elif not choices:
inputWidth, inputHeight = dc.GetTextExtent(str(initial))
inputLength = wx.Size(max(50, inputWidth + 16),
max(25, inputHeight + 8))
inputBox = wx.TextCtrl(self, -1, str(initial),
size=inputLength)
else:
inputBox = wx.Choice(self, -1,
choices=[str(option)
for option in list(choices)])
# Somewhat dirty hack that allows us to treat the choice just like
# an input box when retrieving the data
inputBox.GetValue = inputBox.GetStringSelection
initial = choices.index(initial) if initial in choices else 0
inputBox.SetSelection(initial)
if len(color):
inputBox.SetForegroundColour(color)
if len(tip):
inputBox.SetToolTip(wx.ToolTip(tip))
container.Add(inputBox, 1, wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(container, 1, wx.ALIGN_CENTER)
self.inputFields.append(inputBox) # store this to get data back on OK
return inputBox
def addFixedField(self, label='', value='', tip=''):
"""Adds a field to the dialogue box (like addField) but the
field cannot be edited. e.g. Display experiment version.
tool-tips are disabled (by wx).
"""
thisField = self.addField(label, value, color='Gray', tip=tip)
# wx disables tooltips too; we pass them in anyway
thisField.Disable()
return thisField
def display(self):
"""Presents the dialog and waits for the user to press OK or CANCEL.
If user presses OK button, function returns a list containing the
updated values coming from each of the input fields created.
Otherwise, None is returned.
:return: self.data
"""
# add buttons for OK and Cancel
buttons = wx.BoxSizer(wx.HORIZONTAL)
OK = wx.Button(self, wx.ID_OK, self.labelButtonOK)
OK.SetDefault()
buttons.Add(OK)
CANCEL = wx.Button(self, wx.ID_CANCEL, self.labelButtonCancel)
buttons.Add(CANCEL)
self.sizer.Add(buttons, 1, flag=wx.ALIGN_RIGHT, border=5)
self.SetSizerAndFit(self.sizer)
if self.pos is None:
self.Center()
if self.ShowModal() == wx.ID_OK:
self.data = []
# get data from input fields
for n in range(len(self.inputFields)):
thisName = self.inputFieldNames[n]
thisVal = self.inputFields[n].GetValue()
thisType = self.inputFieldTypes[n]
# try to handle different types of input from strings
logging.debug("%s: %s" % (self.inputFieldNames[n],
str(thisVal)))
if thisType in (tuple, list, float, int):
# probably a tuple or list
exec("self.data.append(" + thisVal + ")") # evaluate it
elif thisType == numpy.ndarray:
exec("self.data.append(numpy.array(" + thisVal + "))")
elif thisType in (str, bool):
self.data.append(thisVal)
else:
logging.warning('unknown type:' + self.inputFieldNames[n])
self.data.append(thisVal)
self.OK = True
else:
self.OK = False
self.Destroy()
if self.OK:
return self.data
def show(self):
"""Presents the dialog and waits for the user to press either
OK or CANCEL.
When they do, dlg.OK will be set to True or False (according to
which button they pressed. If OK==True then dlg.data will be
populated with a list of values coming from each of the input
fields created.
"""
return self.display()
class DlgFromDict(Dlg):
"""Creates a dialogue box that represents a dictionary of values.
Any values changed by the user are change (in-place) by this
dialogue box.
Parameters
----------
sortKeys : bool
Whether the dictionary keys should be ordered alphabetically
for displaying.
copyDict : bool
If False, modify ``dictionary`` in-place. If True, a copy of
the dictionary is created, and the altered version (after
user interaction) can be retrieved from
:attr:~`psychopy.gui.DlgFromDict.dictionary`.
show : bool
Whether to immediately display the dialog upon instantiation.
If False, it can be displayed at a later time by calling
its `show()` method.
e.g.:
::
info = {'Observer':'jwp', 'GratingOri':45,
'ExpVersion': 1.1, 'Group': ['Test', 'Control']}
infoDlg = gui.DlgFromDict(dictionary=info,
title='TestExperiment', fixed=['ExpVersion'])
if infoDlg.OK:
print(info)
else:
print('User Cancelled')
In the code above, the contents of *info* will be updated to the values
returned by the dialogue box.
If the user cancels (rather than pressing OK),
then the dictionary remains unchanged. If you want to check whether
the user hit OK, then check whether DlgFromDict.OK equals
True or False
See GUI.py for a usage demo, including order and tip (tooltip).
"""
def __init__(self, dictionary, title='', fixed=None, order=None, tip=None,
sortKeys=True, copyDict=False, show=True,
sort_keys=None, copy_dict=None):
# We don't explicitly check for None identity
# for backward-compatibility reasons.
if not fixed:
fixed = []
if not order:
order = []
if not tip:
tip = dict()
# app = ensureWxApp() done by Dlg
super().__init__(title)
if copyDict:
self.dictionary = dictionary.copy()
else:
self.dictionary = dictionary
self._keys = list(self.dictionary.keys())
if sortKeys:
self._keys.sort()
if order:
self._keys = list(order) + list(set(self._keys).difference(set(order)))
types = dict()
for field in self._keys:
types[field] = type(self.dictionary[field])
tooltip = ''
if field in tip:
tooltip = tip[field]
if field in fixed:
self.addFixedField(field, self.dictionary[field], tip=tooltip)
elif type(self.dictionary[field]) in [list, tuple]:
self.addField(field, choices=self.dictionary[field],
tip=tooltip)
else:
self.addField(field, self.dictionary[field], tip=tooltip)
if show:
self.show()
def show(self):
"""Display the dialog.
"""
super().show()
if self.OK:
for n, thisKey in enumerate(self._keys):
self.dictionary[thisKey] = self.data[n]
def fileSaveDlg(initFilePath="", initFileName="",
prompt=_translate("Select file to save"),
allowed=None):
"""A simple dialogue allowing write access to the file system.
(Useful in case you collect an hour of data and then try to
save to a non-existent directory!!)
:parameters:
initFilePath: string
default file path on which to open the dialog
initFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string
A string to specify file filters.
e.g. "BMP files (*.bmp)|*.bmp|GIF files (*.gif)|*.gif"
See http://www.wxpython.org/docs/api/wx.FileDialog-class.html
for further details
If initFilePath or initFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels the None is returned.
"""
if allowed is None:
allowed = "All files (*.*)|*.*"
# "txt (*.txt)|*.txt"
# "pickled files (*.pickle, *.pkl)|*.pickle"
# "shelved files (*.shelf)|*.shelf"
global app # avoid recreating for every gui
app = ensureWxApp()
dlg = wx.FileDialog(None, prompt, initFilePath,
initFileName, allowed, wx.FD_SAVE)
if dlg.ShowModal() == OK:
# get names of images and their directory
outName = dlg.GetFilename()
outPath = dlg.GetDirectory()
dlg.Destroy()
# tmpApp.Destroy() # this causes an error message for some reason
fullPath = os.path.join(outPath, outName)
else:
fullPath = None
return fullPath
def fileOpenDlg(tryFilePath="",
tryFileName="",
prompt=_translate("Select file(s) to open"),
allowed=None):
"""A simple dialogue allowing read access to the file system.
:parameters:
tryFilePath: string
default file path on which to open the dialog
tryFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string (available since v1.62.01)
a string to specify file filters.
e.g. "BMP files (*.bmp)|*.bmp|GIF files (*.gif)|*.gif"
See http://www.wxpython.org/docs/api/wx.FileDialog-class.html
for further details
If tryFilePath or tryFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels, then None is returned.
"""
if allowed is None:
allowed = ("PsychoPy Data (*.psydat)|*.psydat|"
"txt (*.txt,*.dlm,*.csv)|*.txt;*.dlm;*.csv|"
"pickled files (*.pickle, *.pkl)|*.pickle|"
"shelved files (*.shelf)|*.shelf|"
"All files (*.*)|*.*")
global app # avoid recreating for every gui
app = ensureWxApp()
dlg = wx.FileDialog(None, prompt, tryFilePath, tryFileName, allowed,
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE)
if dlg.ShowModal() == OK:
# get names of images and their directory
fullPaths = dlg.GetPaths()
else:
fullPaths = None
dlg.Destroy()
return fullPaths
|
gpl-3.0
| 6,954,707,252,832,250,000
| 35.769412
| 83
| 0.580278
| false
| 4.180578
| false
| false
| false
|
OpenDaisy/daisy-api
|
daisy/gateway.py
|
1
|
12287
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from daisy.api import authorization
from daisy.api import policy
from daisy.api import property_protections
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import store_utils
import daisy.db
import daisy.domain
import daisy.location
import daisy.notifier
import daisy.quota
try:
import daisy.search
daisy_search = daisy.search
except ImportError:
daisy_search = None
LOG = logging.getLogger(__name__)
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None, es_api=None):
self.db_api = db_api or daisy.db.get_api()
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or daisy.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()
if es_api:
self.es_api = es_api
else:
self.es_api = daisy_search.get_api() if daisy_search else None
def get_image_factory(self, context):
image_factory = daisy.domain.ImageFactory()
store_image_factory = daisy.location.ImageFactoryProxy(
image_factory, context, self.store_api, self.store_utils)
quota_image_factory = daisy.quota.ImageFactoryProxy(
store_image_factory, context, self.db_api, self.store_utils)
policy_image_factory = policy.ImageFactoryProxy(
quota_image_factory, context, self.policy)
notifier_image_factory = daisy.notifier.ImageFactoryProxy(
policy_image_factory, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pif = property_protections.ProtectedImageFactoryProxy(
notifier_image_factory, context, property_rules)
authorized_image_factory = authorization.ImageFactoryProxy(
pif, context)
else:
authorized_image_factory = authorization.ImageFactoryProxy(
notifier_image_factory, context)
return authorized_image_factory
def get_image_member_factory(self, context):
image_factory = daisy.domain.ImageMemberFactory()
quota_image_factory = daisy.quota.ImageMemberFactoryProxy(
image_factory, context, self.db_api, self.store_utils)
policy_member_factory = policy.ImageMemberFactoryProxy(
quota_image_factory, context, self.policy)
authorized_image_factory = authorization.ImageMemberFactoryProxy(
policy_member_factory, context)
return authorized_image_factory
def get_repo(self, context):
image_repo = daisy.db.ImageRepo(context, self.db_api)
store_image_repo = daisy.location.ImageRepoProxy(
image_repo, context, self.store_api, self.store_utils)
quota_image_repo = daisy.quota.ImageRepoProxy(
store_image_repo, context, self.db_api, self.store_utils)
policy_image_repo = policy.ImageRepoProxy(
quota_image_repo, context, self.policy)
notifier_image_repo = daisy.notifier.ImageRepoProxy(
policy_image_repo, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pir = property_protections.ProtectedImageRepoProxy(
notifier_image_repo, context, property_rules)
authorized_image_repo = authorization.ImageRepoProxy(
pir, context)
else:
authorized_image_repo = authorization.ImageRepoProxy(
notifier_image_repo, context)
return authorized_image_repo
def get_task_factory(self, context):
task_factory = daisy.domain.TaskFactory()
policy_task_factory = policy.TaskFactoryProxy(
task_factory, context, self.policy)
notifier_task_factory = daisy.notifier.TaskFactoryProxy(
policy_task_factory, context, self.notifier)
authorized_task_factory = authorization.TaskFactoryProxy(
notifier_task_factory, context)
return authorized_task_factory
def get_task_repo(self, context):
task_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_repo = policy.TaskRepoProxy(
task_repo, context, self.policy)
notifier_task_repo = daisy.notifier.TaskRepoProxy(
policy_task_repo, context, self.notifier)
authorized_task_repo = authorization.TaskRepoProxy(
notifier_task_repo, context)
return authorized_task_repo
def get_task_stub_repo(self, context):
task_stub_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_stub_repo = policy.TaskStubRepoProxy(
task_stub_repo, context, self.policy)
notifier_task_stub_repo = daisy.notifier.TaskStubRepoProxy(
policy_task_stub_repo, context, self.notifier)
authorized_task_stub_repo = authorization.TaskStubRepoProxy(
notifier_task_stub_repo, context)
return authorized_task_stub_repo
def get_task_executor_factory(self, context):
task_repo = self.get_task_repo(context)
image_repo = self.get_repo(context)
image_factory = self.get_image_factory(context)
return daisy.domain.TaskExecutorFactory(task_repo,
image_repo,
image_factory)
def get_metadef_namespace_factory(self, context):
ns_factory = daisy.domain.MetadefNamespaceFactory()
policy_ns_factory = policy.MetadefNamespaceFactoryProxy(
ns_factory, context, self.policy)
notifier_ns_factory = daisy.notifier.MetadefNamespaceFactoryProxy(
policy_ns_factory, context, self.notifier)
authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy(
notifier_ns_factory, context)
return authorized_ns_factory
def get_metadef_namespace_repo(self, context):
ns_repo = daisy.db.MetadefNamespaceRepo(context, self.db_api)
policy_ns_repo = policy.MetadefNamespaceRepoProxy(
ns_repo, context, self.policy)
notifier_ns_repo = daisy.notifier.MetadefNamespaceRepoProxy(
policy_ns_repo, context, self.notifier)
authorized_ns_repo = authorization.MetadefNamespaceRepoProxy(
notifier_ns_repo, context)
return authorized_ns_repo
def get_metadef_object_factory(self, context):
object_factory = daisy.domain.MetadefObjectFactory()
policy_object_factory = policy.MetadefObjectFactoryProxy(
object_factory, context, self.policy)
notifier_object_factory = daisy.notifier.MetadefObjectFactoryProxy(
policy_object_factory, context, self.notifier)
authorized_object_factory = authorization.MetadefObjectFactoryProxy(
notifier_object_factory, context)
return authorized_object_factory
def get_metadef_object_repo(self, context):
object_repo = daisy.db.MetadefObjectRepo(context, self.db_api)
policy_object_repo = policy.MetadefObjectRepoProxy(
object_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefObjectRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefObjectRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_resource_type_factory(self, context):
resource_type_factory = daisy.domain.MetadefResourceTypeFactory()
policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy(
resource_type_factory, context, self.policy)
notifier_resource_type_factory = (
daisy.notifier.MetadefResourceTypeFactoryProxy(
policy_resource_type_factory, context, self.notifier)
)
authorized_resource_type_factory = (
authorization.MetadefResourceTypeFactoryProxy(
notifier_resource_type_factory, context)
)
return authorized_resource_type_factory
def get_metadef_resource_type_repo(self, context):
resource_type_repo = daisy.db.MetadefResourceTypeRepo(
context, self.db_api)
policy_object_repo = policy.MetadefResourceTypeRepoProxy(
resource_type_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefResourceTypeRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefResourceTypeRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_property_factory(self, context):
prop_factory = daisy.domain.MetadefPropertyFactory()
policy_prop_factory = policy.MetadefPropertyFactoryProxy(
prop_factory, context, self.policy)
notifier_prop_factory = daisy.notifier.MetadefPropertyFactoryProxy(
policy_prop_factory, context, self.notifier)
authorized_prop_factory = authorization.MetadefPropertyFactoryProxy(
notifier_prop_factory, context)
return authorized_prop_factory
def get_metadef_property_repo(self, context):
prop_repo = daisy.db.MetadefPropertyRepo(context, self.db_api)
policy_prop_repo = policy.MetadefPropertyRepoProxy(
prop_repo, context, self.policy)
notifier_prop_repo = daisy.notifier.MetadefPropertyRepoProxy(
policy_prop_repo, context, self.notifier)
authorized_prop_repo = authorization.MetadefPropertyRepoProxy(
notifier_prop_repo, context)
return authorized_prop_repo
def get_metadef_tag_factory(self, context):
tag_factory = daisy.domain.MetadefTagFactory()
policy_tag_factory = policy.MetadefTagFactoryProxy(
tag_factory, context, self.policy)
notifier_tag_factory = daisy.notifier.MetadefTagFactoryProxy(
policy_tag_factory, context, self.notifier)
authorized_tag_factory = authorization.MetadefTagFactoryProxy(
notifier_tag_factory, context)
return authorized_tag_factory
def get_metadef_tag_repo(self, context):
tag_repo = daisy.db.MetadefTagRepo(context, self.db_api)
policy_tag_repo = policy.MetadefTagRepoProxy(
tag_repo, context, self.policy)
notifier_tag_repo = daisy.notifier.MetadefTagRepoProxy(
policy_tag_repo, context, self.notifier)
authorized_tag_repo = authorization.MetadefTagRepoProxy(
notifier_tag_repo, context)
return authorized_tag_repo
def get_catalog_search_repo(self, context):
if self.es_api is None:
# TODO(mriedem): Make this a separate exception or change to
# warning/error logging in Liberty once we're past string freeze.
# See bug 1441764.
LOG.debug('The search and index services are not available. '
'Ensure you have the necessary prerequisite '
'dependencies installed like elasticsearch to use these '
'services.')
raise exception.ServiceUnavailable()
search_repo = daisy.search.CatalogSearchRepo(context, self.es_api)
policy_search_repo = policy.CatalogSearchRepoProxy(
search_repo, context, self.policy)
return policy_search_repo
|
apache-2.0
| 8,565,885,868,649,443,000
| 45.896947
| 79
| 0.674697
| false
| 4.130084
| false
| false
| false
|
fabioz/mu-repo
|
mu_repo/repos_with_changes.py
|
1
|
2688
|
from mu_repo.action_diff import ParsePorcelain
from mu_repo.execute_parallel_command import ParallelCmd, ExecuteInParallel
#===================================================================================================
# ComputeReposWithChanges
#===================================================================================================
def ComputeReposWithChanges(repos_and_curr_branch, params):
'''
:param repos_and_curr_branch: list(tuple(str, str))
A list with the repos and the current branch for each repo.
:param params: Params
Used to get the git to be used.
:return: dict(str->bool)
A dictionary where the key is the repo and the value a boolean indicating whether
there are local changes in that repo.
'''
commands = []
for repo, _branch in repos_and_curr_branch:
commands.append(ParallelCmd(repo, [params.config.git] + ['status', '-s']))
repos_with_changes = {}
def OnOutput(output):
if not output.stdout:
repos_with_changes[output.repo] = False
else:
repos_with_changes[output.repo] = True
ExecuteInParallel(commands, on_output=OnOutput)
return repos_with_changes
#===================================================================================================
# ComputeReposWithChangesFromCurrentBranchToOrigin
#===================================================================================================
def ComputeReposWithChangesFromCurrentBranchToOrigin(repos_and_curr_branch, params, target_branch=None):
'''
:param repos_and_curr_branch: list(tuple(str, str))
A list with the repos and the current branch for each repo.
:param params: Params
Used to get the git to be used.
:param target_branch: str
If passed, instead of comparing with the same current branch in the origin, it'll compare
with origin/target_branch.
:return: list(str)
Returns a list with the repositories that have some difference from branch to origin/branch.
'''
commands = []
for repo, curr_branch in repos_and_curr_branch:
commands.append(
ParallelCmd(repo, [params.config.git] + ('diff --name-only -z origin/%s' % (
target_branch or curr_branch,)).split()))
repos_with_changes = []
def OnOutput(output):
for _entry in ParsePorcelain(output.stdout, only_split=True):
#Iterate: if we have a match, add it as having a change!
repos_with_changes.append(output.repo)
break
ExecuteInParallel(commands, on_output=OnOutput)
return repos_with_changes
|
gpl-3.0
| 3,322,202,696,225,994,000
| 39.119403
| 104
| 0.56436
| false
| 4.563667
| false
| false
| false
|
biswajit-mandal/contrail-webui-third-party
|
fetch_packages.py
|
1
|
11100
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import errno
import re
import shutil
import subprocess
import sys, getopt
import platform
from time import sleep
_RETRIES = 5
_OPT_VERBOSE = None
_OPT_DRY_RUN = None
_PACKAGE_CACHE='/tmp/cache/' + os.environ['USER'] + '/webui_third_party'
_NODE_MODULES='./node_modules'
_TMP_NODE_MODULES=_PACKAGE_CACHE + '/' + _NODE_MODULES
_TAR_COMMAND = ['tar']
_CACHED_PKG_DISTROS = ('Ubuntu', 'Red Hat', 'CentOS', 'darwin')
from lxml import objectify
def getFilename(pkg, url):
element = pkg.find("local-filename")
if element:
return str(element)
(path, filename) = url.rsplit('/', 1)
m = re.match(r'\w+\?\w+=(.*)', filename)
if m:
filename = m.group(1)
return filename
def setTarCommand():
if isTarGnuVersion():
print 'GNU tar found. we will skip the no-unknown-keyword warning'
global _TAR_COMMAND
_TAR_COMMAND = ['tar', '--warning=no-unknown-keyword']
else:
print 'No GNU tar. will use default tar utility'
def isTarGnuVersion():
cmd = subprocess.Popen(['tar', '--version'],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
if first.lower().find('gnu') != -1:
return True
return False
def getTarDestination(tgzfile, compress_flag):
cmd = subprocess.Popen( _TAR_COMMAND + [ '--exclude=.*','-' + compress_flag + 'tf', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
fields = first.split('/')
return fields[0]
def getZipDestination(tgzfile):
cmd = subprocess.Popen(['unzip', '-t', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
lines = output.split('\n')
for line in lines:
print line
m = re.search(r'testing:\s+([\w\-\.]+)\/', line)
if m:
return m.group(1)
return None
def getFileDestination(file):
start = file.rfind('/')
if start < 0:
return None
return file[start+1:]
def ApplyPatches(pkg):
stree = pkg.find('patches')
if stree is None:
return
for patch in stree.getchildren():
cmd = ['patch']
if patch.get('strip'):
cmd.append('-p')
cmd.append(patch.get('strip'))
if _OPT_VERBOSE:
print "Patching %s <%s..." % (' '.join(cmd), str(patch))
if not _OPT_DRY_RUN:
fp = open(str(patch), 'r')
proc = subprocess.Popen(cmd, stdin = fp)
proc.communicate()
#def VarSubst(cmdstr, filename):
# return re.sub(r'\${filename}', filename, cmdstr)
def GetOSDistro():
distro = ''
if sys.platform == 'darwin':
return sys.platform
else:
try:
return platform.linux_distribution()[0]
except:
pass
return distro
def DownloadPackage(url, ccfile, pkg):
md5 = pkg.md5
pkg.ccfile = ccfile
if url.find('$distro') != -1:
# Platform specific package download
distro = GetOSDistro()
if distro == '':
md5 = md5.other
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
# check if we have the distro in our cache
found = False
for cached_pkg in _CACHED_PKG_DISTROS:
if cached_pkg in distro:
distro = cached_pkg
found = True
break
if found == False:
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
md5 = md5.other
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
distro = distro.lower().replace(" ", "")
url = url.replace('$distro', distro)
md5 = md5[distro]
pkg.distro = distro
# Change the ccfile, add distro before the package name
idx = ccfile.rfind("/")
pkgCachePath = ccfile[:idx] + "/" + distro
pkg.pkgCachePath = pkgCachePath
pkg.ccfile = pkgCachePath + "/" + ccfile[idx + 1:]
ccfile = pkg.ccfile.text
# Now create the directory
try:
os.makedirs(pkgCachePath)
except OSError:
pass
print url
#Check if the package already exists
if os.path.isfile(ccfile):
md5sum = FindMd5sum(ccfile)
if md5sum == md5:
return pkg
else:
os.remove(ccfile)
retry_count = 0
while True:
subprocess.call(['wget', '--no-check-certificate', '-O', ccfile, url])
md5sum = FindMd5sum(ccfile)
if _OPT_VERBOSE:
print "Calculated md5sum: %s" % md5sum
print "Expected md5sum: %s" % md5
if md5sum == md5:
return pkg
elif retry_count <= _RETRIES:
os.remove(ccfile)
retry_count += 1
sleep(1)
continue
else:
raise RuntimeError("MD5sum %s, expected(%s) dosen't match for the "
"downloaded package %s" % (md5sum, md5, ccfile))
return pkg
def ProcessPackage(pkg):
print "Processing %s ..." % (pkg['name'])
url = str(pkg['url'])
filename = getFilename(pkg, url)
ccfile = _PACKAGE_CACHE + '/' + filename
installArguments = pkg.find('install-arguments')
if pkg.format == 'npm-cached':
try:
shutil.rmtree(str(_NODE_MODULES + '/' + pkg['name']))
except OSError as exc:
pass
try:
os.makedirs(_NODE_MODULES)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' failed.. Exiting..'
return
#ccfile = _NODE_MODULES + '/' + filename
pkg = DownloadPackage(url, ccfile, pkg)
#
# Determine the name of the directory created by the package.
# unpack-directory means that we 'cd' to the given directory before
# unpacking.
#
ccfile = pkg.ccfile.text
dest = None
unpackdir = pkg.find('unpack-directory')
if unpackdir:
dest = str(unpackdir)
else:
if pkg.format == 'tgz':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'npm-cached':
dest = _NODE_MODULES + '/' + getTarDestination(ccfile, 'z')
elif pkg.format == 'tbz':
dest = getTarDestination(ccfile, 'j')
elif pkg.format == 'zip':
dest = getZipDestination(ccfile)
elif pkg.format == 'npm':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'file':
dest = getFileDestination(ccfile)
#
# clean directory before unpacking and applying patches
#
rename = pkg.find('rename')
if rename and pkg.format == 'npm-cached':
rename = _NODE_MODULES + '/' + str(rename)
if rename and os.path.isdir(str(rename)):
if not _OPT_DRY_RUN:
shutil.rmtree(str(rename))
elif dest and os.path.isdir(dest):
if _OPT_VERBOSE:
print "Clean directory %s" % dest
if not _OPT_DRY_RUN:
shutil.rmtree(dest)
if unpackdir:
try:
os.makedirs(str(unpackdir))
except OSError as exc:
pass
cmd = None
if pkg.format == 'tgz':
cmd = _TAR_COMMAND + ['-zxvf', ccfile]
elif pkg.format == 'tbz':
cmd = _TAR_COMMAND + ['-jxvf', ccfile]
elif pkg.format == 'zip':
cmd = ['unzip', '-o', ccfile]
elif pkg.format == 'npm':
newDir = _PACKAGE_CACHE
if 'distro' in pkg:
newDir = newDir + pkg.distro
cmd = ['npm', 'install', ccfile, '--prefix', newDir]
if installArguments:
cmd.append(str(installArguments))
elif pkg.format == 'file':
cmd = ['cp', '-af', ccfile, dest]
elif pkg.format == 'npm-cached':
cmd = _TAR_COMMAND + ['-zxvf', ccfile, '-C', _NODE_MODULES]
else:
print 'Unexpected format: %s' % (pkg.format)
return
print 'Issuing command: %s' % (cmd)
if not _OPT_DRY_RUN:
cd = None
if unpackdir:
cd = str(unpackdir)
if pkg.format == 'npm':
try:
os.makedirs(_NODE_MODULES)
os.makedirs(newDir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' ' + newDir + ' failed.. Exiting..'
return
npmCmd = ['cp', '-af', newDir + "/" + _NODE_MODULES + '/' + pkg['name'],
'./node_modules/']
if os.path.exists(newDir + '/' + pkg['name']):
cmd = npmCmd
else:
try:
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
cmd = npmCmd
except OSError:
print ' '.join(cmd) + ' could not be executed, bailing out!'
return
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
if rename and dest:
os.rename(dest, str(rename))
ApplyPatches(pkg)
def FindMd5sum(anyfile):
if sys.platform == 'darwin':
cmd = ['md5', '-r']
else:
cmd = ['md5sum']
cmd.append(anyfile)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = proc.communicate()
md5sum = stdout.split()[0]
return md5sum
def main(filename):
tree = objectify.parse(filename)
root = tree.getroot()
#Check which version of tar is used and skip warning messages.
setTarCommand()
for object in root.iterchildren():
if object.tag == 'package':
ProcessPackage(object)
if __name__ == '__main__':
try:
opts,args = getopt.getopt(sys.argv[1:],"f:",["file="])
except getopt.GetoptError:
raise RuntimeError("Error in parsing the options/arguments")
xmlfile = None
for opt,arg in opts:
if opt in ("-f","--file"):
xmlfile = arg
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
os.makedirs(_PACKAGE_CACHE)
except OSError:
pass
if xmlfile == None:
main('packages.xml')
else:
main(xmlfile)
|
apache-2.0
| -6,112,047,089,367,081,000
| 30.355932
| 97
| 0.53036
| false
| 3.835522
| false
| false
| false
|
jbloomlab/phydms
|
phydmslib/simulate.py
|
1
|
6135
|
"""Functions for performing simulations, mostly using ``pyvolve``.
Written by Jesse Bloom and Sarah Hilton.
"""
import os
import sys
import math
import phydmslib.models
from phydmslib.constants import (NT_TO_INDEX, AA_TO_INDEX, ALMOST_ZERO)
import pyvolve
import numpy
from tempfile import mkstemp
import random
import Bio.Phylo
def pyvolvePartitions(model, divselection=None):
"""Get list of `pyvolve` partitions for `model`.
Args:
`model` (`phydmslib.models.Models` object)
The model used for the simulations. Currently only
certain `Models` are supported (e.g., `YNGKP`,
`ExpCM`)
`divselection` (`None` or 2-tuple `(divomega, divsites)`)
Set this option if you want to simulate a subset of sites
as under diversifying selection (e.g., an `omega` different
than that used by `model`. In this case, `divomega` is
the omega for this subset of sites, and `divsites` is a list
of the sites in 1, 2, ... numbering.
Returns:
`partitions` (`list` of `pyvolve.Partition` objects)
Can be fed into `pyvolve.Evolver` to simulate evolution.
"""
codons = pyvolve.genetics.Genetics().codons
codon_dict = pyvolve.genetics.Genetics().codon_dict
purines = pyvolve.genetics.Genetics().purines
if divselection:
(divomega, divsites) = divselection
else:
divsites = []
assert all((1 <= r <= model.nsites for r in divsites))
partitions = []
for r in range(model.nsites):
matrix = numpy.zeros((len(codons), len(codons)), dtype='float')
for (xi, x) in enumerate(codons):
for (yi, y) in enumerate(codons):
ntdiffs = [(x[j], y[j]) for j in range(3) if x[j] != y[j]]
if len(ntdiffs) == 1:
(xnt, ynt) = ntdiffs[0]
qxy = 1.0
if (xnt in purines) == (ynt in purines):
qxy *= model.kappa
(xaa, yaa) = (codon_dict[x], codon_dict[y])
fxy = 1.0
if xaa != yaa:
if type(model) ==\
phydmslib.models.ExpCM_empirical_phi_divpressure:
fxy *= (model.omega *
(1 + model.omega2 * model.deltar[r]))
elif r + 1 in divsites:
fxy *= divomega
else:
fxy *= model.omega
if type(model) in [phydmslib.models.ExpCM,
phydmslib.models.ExpCM_empirical_phi,
(phydmslib.models
.ExpCM_empirical_phi_divpressure)]:
qxy *= model.phi[NT_TO_INDEX[ynt]]
pix = model.pi[r][AA_TO_INDEX[xaa]]**model.beta
piy = model.pi[r][AA_TO_INDEX[yaa]]**model.beta
if abs(pix - piy) > ALMOST_ZERO:
fxy *= math.log(piy / pix) / (1.0 - pix / piy)
elif type(model) == phydmslib.models.YNGKP_M0:
for p in range(3):
qxy *= model.phi[p][NT_TO_INDEX[y[p]]]
else:
raise ValueError("Can't handle model type {0}".format(
type(model)))
matrix[xi][yi] = model.mu * qxy * fxy
matrix[xi][xi] = -matrix[xi].sum()
# create model in way that captures print statements from pyvolve
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
try:
m = pyvolve.Model("custom", {"matrix": matrix})
finally:
sys.stdout.close()
sys.stdout = old_stdout
partitions.append(pyvolve.Partition(models=m, size=1))
return partitions
def simulateAlignment(model, treeFile, alignmentPrefix, randomSeed=False):
"""
Simulate an alignment given a model and tree (units = subs/site).
Simulations done using `pyvolve`.
Args:
`model` (`phydmslib.models.Models` object)
The model used for the simulations. Only
models that can be passed to `pyvolve.Partitions`
are supported.
`treeFile` (str)
Name of newick file used to simulate the sequences.
The branch lengths should be in substitutions per site,
which is the default units for all `phydms` outputs.
`alignmentPrefix`
Prefix for the files created by `pyvolve`.
The result of this function is a simulated FASTA alignment
file with the name having the prefix giving by `alignmentPrefix`
and the suffix `'_simulatedalignment.fasta'`.
"""
if randomSeed is False:
pass
else:
random.seed(randomSeed)
# Transform the branch lengths by dividing by the model `branchScale`
tree = Bio.Phylo.read(treeFile, 'newick')
for node in tree.get_terminals() + tree.get_nonterminals():
if (node.branch_length is None) and (node == tree.root):
node.branch_length = 1e-06
else:
node.branch_length /= model.branchScale
fd, temp_path = mkstemp()
Bio.Phylo.write(tree, temp_path, 'newick')
os.close(fd)
pyvolve_tree = pyvolve.read_tree(file=temp_path)
os.remove(temp_path)
# Make the `pyvolve` partition
partitions = pyvolvePartitions(model)
# Simulate the alignment
alignment = '{0}_simulatedalignment.fasta'.format(alignmentPrefix)
info = '_temp_{0}info.txt'.format(alignmentPrefix)
rates = '_temp_{0}_ratefile.txt'.format(alignmentPrefix)
evolver = pyvolve.Evolver(partitions=partitions, tree=pyvolve_tree)
evolver(seqfile=alignment, infofile=info, ratefile=rates)
for f in [rates, info, "custom_matrix_frequencies.txt"]:
if os.path.isfile(f):
os.remove(f)
assert os.path.isfile(alignment)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-3.0
| 3,644,956,278,045,778,400
| 37.829114
| 78
| 0.558924
| false
| 3.796411
| false
| false
| false
|
IBT-FMI/SAMRI
|
samri/utilities.py
|
1
|
13239
|
import multiprocessing as mp
import nibabel as nib
import nipype.interfaces.io as nio
import numpy as np
import os
import pandas as pd
from itertools import product
from joblib import Parallel, delayed
from os import path
# PyBIDS 0.6.5 and 0.10.2 compatibility
try:
from bids.grabbids import BIDSLayout
except ModuleNotFoundError:
from bids.layout import BIDSLayout
try:
from bids.grabbids import BIDSValidator
except ModuleNotFoundError:
from bids_validator import BIDSValidator
N_PROCS=max(mp.cpu_count()-2,2)
def bids_autofind_df(bids_dir,
**kwargs
):
"""Automatically generate a BIDS-like Pandas Dataframe index based on the more flexible `samri.utilities.bids_autofind` function.
Parameters
----------
bids_dir : str
Path to BIDS-formatted directory
type : {"func", "anat"}
Which type to source data for (currently only supports "func", and "anat" - ideally we could extend this to include "dwi").
Returns
-------
path_template : str
String which can be formatted with any of the dictionaries in `substitutions`
substitutions : list of dicti
A substitution iterator usable as a standard SAMRI function input, which (together with `path_template`) unambiguoulsy identifies input files for analysis.
"""
bids_dir = path.abspath(path.expanduser(bids_dir))
if not os.path.exists(bids_dir):
print('{} path not found'.format(bids_dir))
else: print('{} path found'.format(bids_dir))
path_template, substitutions = bids_autofind(bids_dir, **kwargs)
for i in substitutions:
i['path'] = path_template.format(**i)
df = pd.DataFrame.from_records(substitutions)
return df
def bids_autofind(bids_dir,
typ='',
path_template="sub-{{subject}}/ses-{{session}}/{typ}/sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_run-{{run}}.nii.gz",
match_regex='',
):
"""Automatically generate a BIDS path template and a substitution iterator (list of dicts, as produced by `samri.utilities.bids_substitution_iterator`, and used as a standard input SAMRI function input) from a BIDS-respecting directory.
Parameters
----------
bids_dir : str
Path to BIDS-formatted directory
type : {"func", "anat"}
Which type to source data for (currently only supports "func", and "anat" - ideally we could extend this to include "dwi").
Returns
-------
path_template : str
String which can be formatted with any of the dictionaries in `substitutions`
substitutions : list of dicti
A substitution iterator usable as a standard SAMRI function input, which (together with `path_template`) unambiguoulsy identifies input files for analysis.
"""
bids_dir = path.abspath(path.expanduser(bids_dir))
if match_regex:
pass
elif typ in ("func","dwi"):
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/'+typ+'/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+)\.nii.gz'
elif typ == "":
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+).*?_run-(?P<run>[0-9]+).*?\.nii.gz'
elif typ == "anat":
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/anat/.*?_(?P<task>.+).*?_acq-(?P<acquisition>.+)\.nii.gz'
if path_template[:1] != '/' and 'bids_dir' not in path_template:
path_template = '{bids_dir}/'+path_template
path_template = path_template.format(bids_dir=bids_dir, typ=typ)
datafind = nio.DataFinder()
datafind.inputs.root_paths = bids_dir
datafind.inputs.match_regex = match_regex
datafind_res = datafind.run()
substitutions = []
for ix, i in enumerate(datafind_res.outputs.out_paths):
substitution = {}
try:
substitution["acquisition"] = datafind_res.outputs.acquisition[ix]
except AttributeError: pass
try:
substitution["subject"] = datafind_res.outputs.sub[ix]
except AttributeError: pass
try:
substitution["session"] = datafind_res.outputs.ses[ix]
except AttributeError: pass
try:
substitution["task"] = datafind_res.outputs.task[ix]
except AttributeError: pass
try:
substitution["run"] = datafind_res.outputs.run[ix]
except AttributeError: pass
try:
substitution["modality"] = datafind_res.outputs.modality[ix]
except AttributeError: pass
reconstructed_path = path.abspath(path.expanduser(path_template.format(**substitution)))
original_path = path.abspath(path.expanduser(i))
if reconstructed_path != original_path:
print("Original DataFinder path: "+original_path)
print("Reconstructed path: "+reconstructed_path)
raise ValueError("The reconstructed file path based on the substitution dictionary and the path template, is not identical to the corresponding path, found by `nipype.interfaces.io.DataFinder`. See string values above.")
substitutions.append(substitution)
return path_template, substitutions
def bids_substitution_iterator(sessions, subjects,
tasks=[''],
runs=[''],
data_dir='',
preprocessing_dir='',
acquisitions=[''],
modalities=[''],
l1_dir=None,
l1_workdir=None,
preprocessing_workdir=None,
validate_for_template=None,
):
"""Returns a list of dictionaries, which can be used together with a template string to identify large sets of input data files for SAMRI functions.
Parameters
----------
sessions : list
A list of session identifiers to include in the iterator.
subjects : list
A list of subject identifiers to include in the iterator.
TASKS : list, optional
A list of scan types to include in the iterator.
data_dir : str, optional
Path to the data root (this is where SAMRI creates e.g. `preprocessing`, `l1`, or `l2` directories.
preprocessing_dir : str, optional
String identifying the preprocessing pipeline name from which to provide an iterator.
l1_dir : str, optional
String identifying the level 1 pipeline name from which to provide an iterator. If `None` the level 1 pipeline name is assumed to correspond to the preprocessing pipeline name (`preprocessing_dir`)
l1_workdir : str, optional
String identifying the level 1 work directory name from which to provide an iterator. If `None` the level 1 work directory name is assumed to be the level 1 pipeline name (`l1_dir`) suffixed with the string `"_work"`.
preprocessing_workdir : str, optional
String identifying the preprocessing work directory name from which to provide an iterator. If `None` the preprocessing work directory name is assumed to be the preprocessing pipeline name (`preprocessing_dir`) suffixed with the string `"_work"`.
validate_for_template : str, optional
Template string for which to check whether a file exists.
If no file exists given a substitution dictionary, that dictionary will not be added to the retuned list.
If this variable is an empty string (or otherwise evaluates as False) no check is performed, and all dictionaries (i.e. all input value permutations) are returned.
Returns
-------
list of dictionaries
With the keys being `"data_dir"`, `"subject"`, `"session"`, `"task"`!!!.
"""
substitutions=[]
subjects = list(dict.fromkeys(subjects))
sessions = list(dict.fromkeys(sessions))
tasks = list(dict.fromkeys(tasks))
runs = list(dict.fromkeys(runs))
acquisitions = list(dict.fromkeys(acquisitions))
modalities = list(dict.fromkeys(modalities))
for subject, session, task, run, acquisition, modality in product(subjects, sessions, tasks, runs, acquisitions, modalities):
substitution={}
substitution["data_dir"] = data_dir
substitution["task"] = task
substitution["run"] = run
substitution["session"] = session
substitution["subject"] = subject
substitution["acquisition"] = acquisition
substitution["modality"] = modality
if validate_for_template:
check_file = validate_for_template.format(**substitution)
check_file = path.abspath(path.expanduser(check_file))
if path.isfile(check_file):
substitutions.append(substitution)
else: print('no file under path')
else:
substitutions.append(substitution)
return substitutions
def iter_collapse_by_path(in_files, out_files,
n_jobs=None,
n_jobs_percentage=0.75,
):
"""Patalellized iteration of `samri.utilities.collapse_by_path`."""
if not n_jobs:
n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
out_files = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(collapse_by_path),
in_files,
out_files,
))
return out_files
def collapse_by_path(in_path, out_path):
"""Wrapper for `samri.utilities.collapse`, supporting an input path and saving object to an output path."""
in_path = os.path.abspath(os.path.expanduser(in_path))
out_path = os.path.abspath(os.path.expanduser(out_path))
img = nib.load(in_path)
img = collapse(img)
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
#race-condition safe:
try:
os.makedirs(out_dir)
except OSError:
pass
nib.save(img, out_path)
return out_path
def collapse(img,
min_dim=3,
):
"""
Collapse a nibabel image allong its last axis
Parameters
----------
img : nibabel.nifti1.Nifti1Image
Nibabel image to be collapsed.
min_dim : int
Bimensionality beyond which not to collapse.
"""
ndim = 0
data = img.get_data()
for i in range(len(img.header['dim'])-1):
current_dim = img.header['dim'][i+1]
if current_dim == 1:
break
ndim += 1
if ndim <= min_dim:
return img
img.header['dim'][0] = ndim
img.header['pixdim'][ndim+1:] = 0
data = np.mean(data,axis=(ndim-1))
img = nib.nifti1.Nifti1Image(data, img.affine, img.header)
return img
def session_irregularity_filter(bids_path, exclude_irregularities):
"""
Create a Pandas Dataframe recording which session-animal combinations should be excluded, based on an irregularity criterion.
Parameters
----------
bids_path: str
Path to the root of the BIDS directory containing `_sessions.tsv` files.
exclude_irregularities: list of str
Irregularity strings which will disqualify a scan.
The logic for the exclusion is "any", if even one of the irregularities is present, the scan will be disqualified.
"""
bids_path = os.path.abspath(os.path.expanduser(bids_path))
sessions = []
for sub_dir in os.listdir(bids_path):
sub_path = os.path.join(bids_path,sub_dir)
if os.path.isdir(sub_path) and sub_dir[:4] == 'sub-':
session_file = os.path.join(sub_path,'{}_sessions.tsv'.format(sub_dir))
if os.path.isfile(session_file):
_df = pd.read_csv(session_file, sep='\t')
subject = sub_dir[4:]
for ix, row in _df.iterrows():
ses_entry = {}
session = row['session_id'][4:]
irregularities = row['irregularities']
ses_entry['subject'] = subject
ses_entry['session'] = session
ses_entry['irregularities'] = irregularities
try:
ses_entry['exclude'] = any(i in irregularities for i in exclude_irregularities)
except TypeError:
ses_entry['exclude'] = False
sessions.append(ses_entry)
return pd.DataFrame(sessions)
def ordered_structures(
atlas='/usr/share/mouse-brain-templates/dsurqec_40micron_labels.nii',
mapping='/usr/share/mouse-brain-templates/dsurqe_labels.csv',
label_columns=['right label','left label'],
structure_column='Structure',
remove_zero_label=True,
):
"""Return a list of structure names corresponding to the ascending order of numerical labels in the atlas image.
Parameters
----------
atlas : str or nibabel.Nifti1Image, optional
Path to a NIfTI atlas file.
mapping : str or pandas.DataFrame, optional
Path to a CSV mapping file containing columns which include the string specified under `structure_column` and the strings specified under `label_columns`.
The latter of these columns need to include the numerical values found in the data matrix of the file whose path is assigned to `atlas`.
label_columns : list, optional
Names of columns in the `mapping` file under which numerical labels are specified.
This can be a length-2 list if separate columns exist for left and right labels; in this case the function will perform the differentiation implicitly.
structure_column : str, optional
The name of the column, which in the `mapping` file records the structure names.
remove_zero_label : bool, optional
Whether to disconsider the zero label in the atlas image.
"""
if isinstance(atlas, str):
atlas = path.abspath(path.expanduser(atlas))
atlas = nib.load(atlas)
if isinstance(mapping, str):
mapping = path.abspath(path.expanduser(mapping))
mapping = pd.read_csv(mapping)
atlas_data = atlas.get_data()
atlas_data_unique = np.unique(atlas_data)
if remove_zero_label:
atlas_data_unique = atlas_data_unique[atlas_data_unique != 0]
structure_names = []
for label in atlas_data_unique:
structure_name = []
for label_column in label_columns:
try:
structure = mapping.loc[mapping[label_column]==label,structure_column].values[0]
except IndexError:
pass
else:
if any(i in label_column for i in ['right','Right','RIGHT']):
lateralized_structure = '{} (R)'.format(structure)
structure_name.append(lateralized_structure)
elif any(i in label_column for i in ['left','Left','LEFT']):
lateralized_structure = '{} (L)'.format(structure)
structure_name.append(lateralized_structure)
else:
structure_name.append(structure)
if len(structure_name) != 1:
structure_name = structure
else:
structure_name = structure_name[0]
structure_names.append(structure_name)
return structure_names
|
gpl-3.0
| 6,278,964,047,147,906,000
| 36.610795
| 248
| 0.720296
| false
| 3.276169
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/_operations.py
|
1
|
4729
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2017_06_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Storage/operations'} # type: ignore
|
mit
| -814,979,723,569,684,100
| 42.385321
| 133
| 0.639459
| false
| 4.604674
| false
| false
| false
|
artisanofcode/flask-generic-views
|
docs/conf.py
|
1
|
10315
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flask-Generic-Views documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 30 04:16:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import ast
import os
import re
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-Generic-Views'
copyright = '2015, Daniel Knell'
author = 'Daniel Knell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('../flask_generic_views/__init__.py', 'rb') as f:
release = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
# The short X.Y version.
version=release.split('-')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Generic-Viewsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-Generic-Views.tex', 'Flask-Generic-Views Documentation',
'Daniel Knell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-generic-views', 'Flask-Generic-Views Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-Generic-Views', 'Flask-Generic-Views Documentation',
author, 'Flask-Generic-Views', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/dev', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'flask': ('http://flask.pocoo.org/docs/', None),
'jinja': ('http://jinja.pocoo.org/docs/', None),
'sqlalchemy': ('http://www.sqlalchemy.org/docs/', None),
'wtforms': ('https://wtforms.readthedocs.org/en/latest/', None),
'flaskwtf': ('https://flask-wtf.readthedocs.org/en/latest/', None),
'flasksqlalchemy': ('http://flask-sqlalchemy.pocoo.org/', None)}
|
mit
| 8,997,364,476,504,771,000
| 32.49026
| 90
| 0.693747
| false
| 3.709097
| true
| false
| false
|
padraic-padraic/StabilizerSearch
|
stabilizer_search/search/brute_force.py
|
1
|
3184
|
from itertools import combinations
from math import factorial
from six import PY2
from random import shuffle
from ._search import _Search
from ._result import _Result
from ..core.linalg import get_projector, projection_distance, subspace_distance
from ..stabilizers import get_stabilizer_states
from numba import njit
import numpy as np
def ncr(n, r):
return factorial(n)//factorial(r)//factorial(n-r)
def do_brute_force(n_qubits, stabilizer_states, target, distance_func,
chi=None, lower_bound=1, real_only=False):
"""Function which performs the brute force search for stabilizer rank.
Takes a number of qubits and the target state as input, and returns
success: Bool, did the method succeed?
chi: The rank found
basis: the resulting decomposition"""
dims = pow(2, n_qubits)
shuffle(stabilizer_states)
if chi is None:
for i in range(lower_bound, pow(2, n_qubits)):
print('Test with {} states.'.format(i))
for basis in combinations(stabilizer_states, i):
projector = get_projector([b for b in basis])
distance = distance_func(target, projector)
if np.allclose(distance, 0.):
return True, i, basis
return False, dims, None
else:
print('Searching brute force with chi={}'.format(chi))
# print('Got {} combinations to test'.format(ncr(len(stabilizer_states), chi)))
for basis in combinations(stabilizer_states, chi):
projector = get_projector([b for b in basis])
distance = distance_func(target, projector)
if np.allclose(distance, 0.):
return True, chi, basis
return False, chi, None
def brute_force_search(n_qubits, target, **kwargs):
real_only = kwargs.pop(
'real_only',
np.all(np.nonzero(np.imag(target))))
stabilizer_states = get_stabilizer_states(
n_qubits, real_only=real_only)
if target.shape[1] == 1:
distance_func = projection_distance
else:
distance_func = subspace_distance
do_brute_force(n_qubits, stabilizer_states, target, distance_func,
real_only=real_only, **kwargs)
class BruteForceResult(_Result):
ostring = """
The Brute Force method for the state {target_state} on {n_qubits} qubits
{success}.
We found a decomposition with stabilizer rank {chi}, which looked like:
{decomposition}.
"""
def __init__(self, *args):
args = list(args)
self.basis = args[-1]
args[-1] = self.parse_decomposition(args[-1])
super(BruteForceResult, self).__init__(*args)
def parse_decomposition(self, decomposition):
"""Additional method for BruceForceResult that takes the decompositions
and converts them to strings."""
if decomposition is None:
return "Bubkis"
return "\n".join(str(state) for state in decomposition)
class BruteForceSearch(_Search):
Result_Class = BruteForceResult
func = staticmethod(brute_force_search)
def __init__(self, *args, **kwargs):
super(BruteForceSearch, self).__init__(*args, **kwargs)
|
gpl-3.0
| -47,993,443,974,661,890
| 34.775281
| 87
| 0.643216
| false
| 3.794994
| false
| false
| false
|
CLVsol/odoo_clvhealth_jcafb
|
project/install.py
|
1
|
6159
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import base
import admin_groups_id
import data_admin_groups_id
import xmlrpclib
import erppeek
def install_update_module(module, update, config_admin=False):
modules_to_update = base.modules_to_update
print '%s%s' % ('--> ', module)
if module in modules_to_update:
new_module = base.install_update_module(module, True)
else:
new_module = base.install_update_module(module, update)
if new_module and config_admin:
method = '%s%s' % ('Administrator_groups_id_', module)
print '%s%s' % ('--> ', method)
methodToCall = getattr(admin_groups_id, method)
result = methodToCall()
method = '%s%s' % ('Data_Administrator_groups_id_', module)
print '%s%s' % ('--> ', method)
methodToCall = getattr(data_admin_groups_id, method)
result = methodToCall()
return new_module
def clvhealth_jcafb_install():
update = base.update
print '--> create_database()'
newDB = base.create_database()
if newDB:
print '--> YourCompany()'
base.YourCompany()
print '--> Administrator()'
base.Administrator()
print '--> Administrator_groups_id_updt()'
base.Administrator_groups_id_updt()
print '--> Demo_User()'
base.Demo_User()
print '--> Data_Administrator_User()'
base.Data_Administrator_User()
else:
client = erppeek.Client(base.server,
db=base.dbname,
user=base.admin_user,
password=base.admin_user_pw,
verbose=False)
proxy = client.model('ir.module.module')
proxy.update_list()
new_module = install_update_module('mail', update)
new_module = install_update_module('hr', update)
new_module = install_update_module('website', update)
new_module = install_update_module('marketing', update)
new_module = install_update_module('survey', update)
new_module = install_update_module('l10n_br_base', update)
new_module = install_update_module('l10n_br_zip', update)
# new_module = install_update_module('l10n_br_data_zip', update)
new_module = install_update_module('clv_base', update, True)
new_module = install_update_module('clv_base_cst', update)
new_module = install_update_module('clv_tag', update, True)
new_module = install_update_module('clv_tag_cst', update)
new_module = install_update_module('clv_annotation', update, True)
new_module = install_update_module('clv_annotation_cst', update)
new_module = install_update_module('clv_document', update, True)
new_module = install_update_module('clv_document_cst', update)
new_module = install_update_module('clv_address', update, True)
new_module = install_update_module('l10n_br_clv_address', update)
new_module = install_update_module('clv_address_cst', update)
new_module = install_update_module('clv_person', update, True)
new_module = install_update_module('l10n_br_clv_person', update)
new_module = install_update_module('clv_person_cst', update)
new_module = install_update_module('clv_family', update, True)
new_module = install_update_module('clv_family_cst', update)
new_module = install_update_module('clv_community', update, True)
new_module = install_update_module('clv_community_cst', update)
new_module = install_update_module('clv_patient', update, True)
new_module = install_update_module('clv_patient_cst', update)
new_module = install_update_module('clv_person_mng', update, True)
new_module = install_update_module('l10n_br_clv_person_mng', update)
new_module = install_update_module('clv_employee', update)
new_module = install_update_module('clv_employee_cst', update)
new_module = install_update_module('jcafb_2016_surveys', update)
new_module = install_update_module('jcafb_2016_consent_forms', update)
# new_module = install_update_module('product', update)
new_module = install_update_module('clv_lab_test', update, True)
new_module = install_update_module('clv_lab_test_cst', update)
# new_module = install_update_module('clv_pointing', update, True)
def secondsToStr(t):
return "%d:%02d:%02d.%03d" % \
reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(t*1000,), 1000, 60, 60])
if __name__ == '__main__':
from time import time
base.get_arguments()
start = time()
print '--> Executing clvhealth_jcafb_install.py...'
print '--> Executing clvhealth_jcafb_install()...'
clvhealth_jcafb_install()
print '--> clvhealth_jcafb_install.py'
print '--> Execution time:', secondsToStr(time() - start)
|
agpl-3.0
| 411,191,009,144,921,200
| 34.194286
| 82
| 0.585972
| false
| 3.813622
| false
| false
| false
|
mattstibbs/blockbuster-server
|
blockbuster/messaging/bb_pushover_handler.py
|
1
|
1566
|
import http.client
import urllib
import logging
import redis
from rq import Queue
import blockbuster.config as config
import blockbuster.bb_auditlogger as bb_auditlogger
# Set up RQ queue
conn = redis.from_url(config.REDIS_URL)
q = Queue(connection=conn)
log = logging.getLogger(__name__)
def send_push_notification(a, b, c, d):
q.enqueue(send_push_message, a, b, c, d)
log.debug("Pushover notification queued.")
def send_push_message(user_key, message, message_title, service_number):
try:
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": config.pushover_app_token,
"user": user_key,
"title": message_title,
"message": message,
"url": "sms:" + service_number,
"url_title": "Send SMS to BlockBuster",
"priority": 1
}), {"Content-type": "application/x-www-form-urlencoded"})
log.debug(conn.getresponse())
audit_description = "Key:" + user_key + \
";Title:" + message_title + \
";Message:" + message
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('bgwrk', 'SEND-PUSHOVER', audit_description)
print("Pushover notification sent to " + user_key)
except Exception as e:
log.error("Error sending Pushover notification \n" + str(e))
bb_auditlogger.BBAuditLoggerFactory().create().logException('bgwrk','SEND-PUSHOVER', str(e))
|
mit
| -340,374,393,596,961,600
| 30.32
| 108
| 0.619413
| false
| 3.65035
| false
| false
| false
|
richardkiss/pycoinnet
|
pycoinnet/examples/address_keeper.py
|
1
|
5205
|
#!/usr/bin/env python
"""
This bitcoin client does little more than try to keep an up-to-date
list of available clients in a text file "addresses".
"""
import asyncio
import binascii
import logging
import random
import time
from pycoinnet.helpers.standards import initial_handshake, version_data_for_peer
from pycoinnet.peer.BitcoinPeerProtocol import BitcoinPeerProtocol
class AddressDB(object):
def __init__(self, path):
self.path = path
self.addresses = self.load_addresses()
self.shuffled = []
def load_addresses(self):
"""
Return an array of (host, port, timestamp) triples.
"""
addresses = {}
try:
with open(self.path) as f:
for l in f:
timestamp, host, port = l[:-1].split("/")
timestamp = int(timestamp)
port = int(port)
addresses[(host, port)] = timestamp
except Exception:
logging.error("can't open %s, using default", self.path)
for h in [
"bitseed.xf2.org", "dnsseed.bluematt.me",
"seed.bitcoin.sipa.be", "dnsseed.bitcoin.dashjr.org"
]:
addresses[(h, 8333)] = 1
return addresses
def next_address(self):
if len(self.shuffled) == 0:
self.shuffled = list(self.addresses.keys())
random.shuffle(self.shuffled)
return self.shuffled.pop()
def remove_address(self, host, port):
key = (host, port)
del self.addresses[key]
def add_address(self, host, port, timestamp):
key = (host, port)
old_timestamp = self.addresses.get(key) or timestamp
self.addresses[key] = max(timestamp, old_timestamp)
def add_addresses(self, addresses):
for timestamp, host, port in addresses:
self.add_address(host, port, timestamp)
def save(self):
if len(self.addresses) < 2:
logging.error("too few addresses: not overwriting")
return
with open(self.path, "w") as f:
for host, port in self.addresses:
f.write(
"%d/%s/%d\n" % (self.addresses[(host, port)], host, port))
class AddressKeeper:
def __init__(self, peer, address_db):
next_message = peer.new_get_next_message_f(lambda name, data: name == 'addr')
def get_msg_addr():
peer.send_msg("getaddr")
name, data = yield from next_message()
date_address_tuples = data["date_address_tuples"]
logging.info("got %s message from %s with %d entries", name, peer, len(date_address_tuples))
address_db.add_addresses(
(timestamp, address.ip_address.exploded, address.port)
for timestamp, address in date_address_tuples)
address_db.save()
# we got addresses from this client. Exit loop and disconnect
peer.transport.close()
self.get_addr_task = asyncio.Task(get_msg_addr())
@asyncio.coroutine
def connect_to_remote(event_loop, magic_header, address_db, connections):
host, port = address_db.next_address()
logging.info("connecting to %s port %d", host, port)
try:
transport, peer = yield from event_loop.create_connection(
lambda: BitcoinPeerProtocol(magic_header),
host=host, port=port)
except Exception:
logging.exception("failed to connect to %s:%d", host, port)
address_db.remove_address(host, port)
address_db.save()
return
try:
logging.info("connected to %s:%d", host, port)
yield from asyncio.wait_for(peer.connection_made_future, timeout=None)
version_parameters = version_data_for_peer(peer)
yield from initial_handshake(peer, version_parameters)
AddressKeeper(peer, address_db)
address_db.add_address(host, port, int(time.time()))
connections.add(peer)
except Exception:
logging.exception("exception talking to %s:%d", host, port)
logging.info("done talking to %s:%d", host, port)
def keep_minimum_connections(event_loop, min_connection_count=4):
connections = set()
address_db = AddressDB("addresses.txt")
magic_header = binascii.unhexlify('F9BEB4D9') # use 0B110907 for testnet3
tasks = set()
while 1:
logging.debug("connection count is %d", len(connections))
difference = min_connection_count - len(connections)
for i in range(difference*2):
f = asyncio.Task(connect_to_remote(
event_loop, magic_header, address_db, connections))
tasks.add(f)
f.add_callback(lambda x: tasks.discard(f))
yield from asyncio.sleep(10)
def main():
logging.basicConfig(
level=logging.INFO,
format=('%(asctime)s [%(process)d] [%(levelname)s] '
'%(filename)s:%(lineno)d %(message)s'))
event_loop = asyncio.get_event_loop()
# kmc_task is never used, but if we don't keep a reference, the
# Task is collected (and stops)
kmc_task = asyncio.Task(keep_minimum_connections(event_loop))
event_loop.run_forever()
main()
|
mit
| -1,233,479,060,370,532,000
| 34.408163
| 104
| 0.603266
| false
| 3.841328
| false
| false
| false
|
dzolnierz/mysql-utilities
|
mysql/utilities/common/sql_transform.py
|
1
|
61521
|
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the methods for building SQL statements for definition
differences.
"""
import re
from mysql.utilities.exception import UtilError, UtilDBError
from mysql.connector.conversion import MySQLConverter
_IGNORE_COLUMN = -1 # Ignore column in comparisons and transformations
_FORCE_COLUMN = -2 # Force column to be included in build phase
# Define column control symbols
_DROP_COL, _ADD_COL, _CHANGE_COL_TYPE, _CHANGE_COL_ORDER = range(0, 4)
# List of database objects for enumeration
_DATABASE, _TABLE, _VIEW, _TRIG, _PROC, _FUNC, _EVENT, _GRANT = "DATABASE", \
"TABLE", "VIEW", "TRIGGER", "PROCEDURE", "FUNCTION", "EVENT", "GRANT"
# Define database INFORMATION_SCHEMA column numbers
_DB_NAME, _DB_CHARSET, _DB_COLLATION, _DB_SQL_PATH = range(0, 4)
# Define table INFORMATION_SCHEMA column numbers and index values
_COLUMN_ORDINAL_POSITION, _COLUMN_NAME, _COLUMN_TYPE, _COLUMN_IS_NULLABLE, \
_COLUMN_DEFAULT, _COLUMN_EXTRA, _COLUMN_COMMENT, _COLUMN_KEY = range(0, 8)
_TABLE_DEF, _COLUMN_DEF, _PART_DEF = range(0, 3)
_TABLE_DB, _TABLE_NAME, _TABLE_ENGINE, _TABLE_AUTO_INCREMENT, \
_TABLE_AVG_ROW_LENGTH, _TABLE_CHECKSUM, _TABLE_COLLATION, _TABLE_COMMENT, \
_TABLE_ROW_FORMAT, _TABLE_CREATE_OPTIONS = range(0, 10)
# Define view INFORMATION_SCHEMA column numbers
_VIEW_DB, _VIEW_NAME, _VIEW_BODY, _VIEW_CHECK, _VIEW_DEFINER, \
_VIEW_SECURITY = range(0, 6)
# Define trigger INFORMATION_SCHEMA column numbers
_TRIGGER_DB, _TRIGGER_NAME, _TRIGGER_EVENT, _TRIGGER_TABLE, _TRIGGER_BODY, \
_TRIGGER_TIME, _TRIGGER_DEFINER = range(0, 7)
# Define routine INFORMATION_SCHEMA column numbers
_ROUTINE_DB, _ROUTINE_NAME, _ROUTINE_BODY, _ROUTINE_SQL_DATA_ACCESS, \
_ROUTINE_SECURITY_TYPE, _ROUTINE_COMMENT, _ROUTINE_DEFINER, \
_ROUTINE_PARAMS, _ROUTINE_RETURNS, _ROUTINE_IS_DETERMINISTIC = range(0, 10)
# Define event INFORMATION_SCHEMA column numbers
_EVENT_DB, _EVENT_NAME, _EVENT_DEFINER, _EVENT_BODY, _EVENT_TYPE, \
_EVENT_INTERVAL_FIELD, _EVENT_INTERVAL_VALUE, _EVENT_STATUS, \
_EVENT_ON_COMPLETION, _EVENT_STARTS, _EVENT_ENDS = range(0, 11)
# Get the constraints but ignore primary keys
_CONSTRAINT_QUERY = """
SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_SCHEMA = '%(db)s' AND TABLE_NAME = '%(name)s'
and CONSTRAINT_TYPE != 'PRIMARY KEY'
and CONSTRAINT_TYPE != 'UNIQUE'
"""
def to_sql(obj):
"""Convert a value to a suitable SQL value placing quotes where needed.
obj[in] object (value) to convert
Returns (string) converted value
"""
to_sql.__dict__.setdefault('converter', MySQLConverter())
obj = to_sql.converter.escape(obj) # pylint: disable=E1101
return str(to_sql.converter.quote(obj)) # pylint: disable=E1101
def quote_with_backticks(identifier):
"""Quote the given identifier with backticks, converting backticks (`) in
the identifier name with the correct escape sequence (``).
identifier[in] identifier to quote.
Returns string with the identifier quoted with backticks.
"""
return "`" + identifier.replace("`", "``") + "`"
def quote_with_backticks_definer(definer):
"""Quote the given definer clause with backticks.
This functions quotes the given definer clause with backticks, converting
backticks (`) in the string with the correct escape sequence (``).
definer[in] definer clause to quote.
Returns string with the definer quoted with backticks.
"""
if not definer:
return definer
parts = definer.split('@')
if len(parts) != 2:
return definer
return '@'.join([quote_with_backticks(parts[0]),
quote_with_backticks(parts[1])])
def remove_backtick_quoting(identifier):
"""Remove backtick quoting from the given identifier, reverting the
escape sequence (``) to a backtick (`) in the identifier name.
identifier[in] identifier to remove backtick quotes.
Returns string with the identifier without backtick quotes.
"""
# remove backtick quotes
identifier = identifier[1:-1]
# Revert backtick escape sequence
return identifier.replace("``", "`")
def is_quoted_with_backticks(identifier):
"""Check if the given identifier is quoted with backticks.
identifier[in] identifier to check.
Returns True if the identifier has backtick quotes, and False otherwise.
"""
return identifier[0] == "`" and identifier[-1] == "`"
def convert_special_characters(str_val):
"""Convert especial characters in the string to respective escape sequence.
This method converts special characters in the input string to the
corresponding MySQL escape sequence, according to:
http://dev.mysql.com/doc/en/string-literals.html#character-escape-sequences
str_val[in] string value to be converted.
Returns the input string with all special characters replaced by its
respective escape sequence.
"""
# Check if the input value is a string before performing replacement.
if str_val and isinstance(str_val, basestring):
# First replace backslash '\' character, to avoid replacing '\' in
# further escape sequences. backslash_re matches '|' not followed by %
# as \% and \_ do not need to be replaced, and when '|' appear at the
# end of the string to be replaced correctly.
backslash_re = r'\\(?=[^%_])|\\\Z'
res = re.sub(backslash_re, r'\\\\', str_val)
# Replace remaining especial characters
res = res.replace('\x00', '\\0') # \0
res = res.replace("'", "\\'") # \'
res = res.replace('"', '\\"') # \"
res = res.replace('\b', '\\b') # \b
res = res.replace('\n', '\\n') # \n
res = res.replace('\r', '\\r') # \r
res = res.replace('\t', '\\t') # \t
res = res.replace(chr(26), '\\Z') # \Z
return res
else:
# Not a string, return the input value
return str_val
def build_pkey_where_clause(table, row):
"""Build the WHERE clause based on the primary keys
table[in] instance of Table class for table
row[in] row of data
Returns string - WHERE clause or "" if no keys
"""
where_str = ""
pkeys = table.get_primary_index()
if len(pkeys) > 0:
col_names = table.get_col_names()
where_str += "WHERE "
pkey_cond_lst = []
for pkey in pkeys:
key_col = pkey[0] # get the column name
col_data = row[col_names.index(key_col)] # get column value
# quote key column with backticks
q_key_col = quote_with_backticks(key_col)
pkey_cond_lst.append("{0} = {1}".format(q_key_col,
to_sql(col_data)))
where_str = "{0}{1}".format(where_str, ' AND '.join(pkey_cond_lst))
return where_str
def build_set_clauses(table, table_cols, dest_row, src_row):
"""Build the SET clauses for an UPDATE statement
table[in] instance of Table class for table
dest_row[in] row of data for destination (to be changed)
src_row[in] row of data for source (to be changed to)
Returns string - WHERE clause or "" if no keys
"""
table.get_column_metadata()
# do SETs
set_str = ""
do_comma = False
for col_idx in range(0, len(table_cols)):
if dest_row[col_idx] != src_row[col_idx]:
# do comma
if do_comma:
set_str += ", "
else:
set_str = "SET "
do_comma = True
# Check for NULL for non-text fields that have no value in new row
if src_row[col_idx] is None:
set_str += "%s = %s" % (table_cols[col_idx], "NULL")
else:
set_str += "%s = %s" % (table_cols[col_idx],
to_sql(src_row[col_idx]))
return set_str
def transform_data(destination, source, operation, rows):
"""Transform data for tables.
This method will generate INSERT, UPDATE, and DELETE statements for
transforming data found to differ among tables.
destination[in] Table class instance of the destination
source[in] Table class instance of the source
operation[in] specify if INSERT, UPDATE, or DELETE
rows[in] rows for transformation as follows:
UPDATE - tuple (old, new)
DELETE - list to delete
INSERT - list to insert
Returns list - SQL statement(s) for transforming the data or a warning
if the columns differ between the tables
"""
statements = []
# Get column names quoted with backticks
dest_cols = destination.get_col_names(quote_backticks=True)
src_cols = source.get_col_names(quote_backticks=True)
# We cannot do the data changes if the columns are different in the
# destination and source!
if dest_cols != src_cols:
return ["WARNING: Cannot generate SQL UPDATE commands for "
"tables whose definitions are different. Check the "
"table definitions for changes."]
data_op = operation.upper()
if data_op == "INSERT":
for row in rows:
formatted_row = []
for col in row:
formatted_row.append(to_sql(col))
statements.append("INSERT INTO %s (%s) VALUES(%s);" %
(destination.q_table, ', '.join(dest_cols),
', '.join(formatted_row)))
elif data_op == "UPDATE":
for i in range(0, len(rows[0])):
row1 = rows[0][i]
row2 = rows[1][i]
sql_str = "UPDATE %s" % destination.q_table
sql_str += " %s" % build_set_clauses(source, src_cols, row1, row2)
sql_str += " %s" % build_pkey_where_clause(source, row2)
statements.append("%s;" % sql_str)
elif data_op == "DELETE":
for row in rows:
sql_str = "DELETE FROM %s " % destination.q_table
sql_str += build_pkey_where_clause(source, row)
statements.append("%s;" % sql_str)
else:
raise UtilError("Unknown data transformation option: %s." % data_op)
return statements
class SQLTransformer(object):
"""
The SQLTransformer class provides a mechanism for generating SQL statments
for conforming an object to another for a specific database. For example,
it will generate the ALTER statement(s) for transforming a table definition
as well as the UPDATE statement(s) for transforming a row in the table.
Note: This class is designed to work with the output of the Database class
method get_db_objects with full INFORMATION_SCHEMA columns for the
object definition.
This class contains transformation methods for the objects supported.
Each object's ALTER statement is generated using the following steps.
Note: tables are a bit different due to their many parts but still follow
the general layout.
- a list of dictionaries structure is built to contain the parts of the
statement where each dictionary has fields for format ('fmt') that
contains the string format for building the value, column ('col') for
containing the column number for the value, and value ('val') which
is for holding the value.
- any special formatting, conditionals, etc. concerning the fields is
processed. In some cases this means filling the 'val' for the field.
- the structure values are filled
- the statement is build by concatenating those fields where 'val' is
not empty.
You can tell the fill values phase to ignore filling the value by using
_IGNORE_COLUMN as the column number.
You can tell the build phase to include the field (say after special
processing has filled the value) by using _FORCE_COLUMN as the column
number.
"""
def __init__(self, destination_db, source_db, destination,
source, obj_type, verbosity, options=None):
"""Constructor
destination_db[in] destination Database instance
source_db[in] source Database instance
destination[in] the original object definition or data
source[in] the source object definition or data
obj_type[in] type of object
verbosity[in] verbosity level
options[in] Options dictionary
"""
self.destination_db = destination_db
self.source_db = source_db
self.destination = destination
self.source = source
self.obj_type = obj_type.upper()
self.verbosity = verbosity
self.dest_tbl = None
self.src_tbl = None
if options is None:
options = {}
self.skip_table_opts = options.get("skip_table_opts", False)
def transform_definition(self):
"""Transform an object definition
This method will transform an object definition to match the source
configuration. It returns the appropriate SQL statement(s) to
transform the object or None if no transformation is needed.
Note: the method will throw an exception if the transformation cannot
be completed or there is another error during processing
Returns list - SQL statement(s) for transforming the object
"""
trans_method = {
_DATABASE: self._transform_database,
_TABLE: self._transform_table,
_VIEW: self._transform_view,
_TRIG: self._transform_trigger,
_PROC: self._transform_routine,
_FUNC: self._transform_routine,
_EVENT: self._transform_event,
}
try:
return trans_method[self.obj_type]()
except IndexError:
raise UtilDBError("Unknown object type '%s' for transformation." %
self.obj_type)
def _transform_database(self):
"""Transform a database definition
This method will transform a database definition to match the source
configuration. It returns the ALTER DATABASE SQL statement to
transform the object or None if no transformation is needed.
Returns list - ALTER DATABASE statement for transforming the database
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER DATABASE"},
# object name
{'fmt': " %s", 'col': _IGNORE_COLUMN,
'val': self.destination[_DB_NAME]},
# charset
{'fmt': " CHARACTER SET %s", 'col': _DB_CHARSET, 'val': ""},
# collation
{'fmt': " COLLATE = %s", 'col': _DB_COLLATION, 'val': ""},
]
# if no changes, return None
if not self._fill_values(statement_parts, False):
return None
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
@staticmethod
def _convert_option_values(option_values):
"""Convert a list of option=value to a list of names and name, value
pairs.
This method takes a list like the following where each element is a
name=value string:
(a=1, b=3, c=5, d=4)
turning into a tuple containing a list of names and a list of
name,value pairs as follows:
((a,b,c,d), ((a,1),(b,3),(c,5),(d,4)))
Value pairs that do not have a value are ignored. For example,
'a=3, b, c=2' will ignore 'b' but return a and c.
option_values[in] list of name=value strings
Returns tuple - (list of names, list of (name, value))
"""
names = []
name_values = []
for value_pair in option_values:
name_value = value_pair.split('=')
# Ignore any value pairs that do not have a value
if len(name_value[0]) > 0:
names.append(name_value[0].upper())
name_values.append(name_value)
return (names, name_values)
@staticmethod
def _find_value(name, name_values):
"""Find a value for a name in a list of tuple (name, value)
name[in] name of pair
name_values[in] list of tuples
Returns string - value at index of match or None
"""
name = name.upper()
for item in name_values:
if item[0].upper() == name:
try:
return item[1]
except IndexError:
return None
return None
def _parse_table_options(self, destination, source):
"""Parse the table options into a list and compare.
This method returns a comma-separated list of table options that
differ from the destination to the source.
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - comma-separated values for table options that differ
or None if options are found in the destination that
are not in the source. These, we do not know how
to remove or turn off without extensive, specialized
code.
"""
from mysql.utilities.common.dbcompare import get_common_lists
# Here we have a comma-separated list of options in the form
# name=value. To determine the inclusion/exclusion lists, we
# must compare on names only so we make a list for each of only
# the names.
dest_opts_names = []
dest_opts = [item.strip() for item in destination.split(',')]
dest_opts_names, dest_opts_val = self._convert_option_values(dest_opts)
dest_opts_names.sort()
src_opts = [item.strip() for item in source.split(',')]
src_opts_names, src_opts_val = self._convert_option_values(src_opts)
src_opts_names.sort()
in_both, in_dest_not_src, in_src_not_dest = \
get_common_lists(dest_opts_names, src_opts_names)
# Whoops! There are things set in the destination that aren't in the
# source so we don't know if these are Ok or if we need to do
# something special.
if len(in_dest_not_src) > 0:
return None
changes = []
# Now check for changes for both
for name in in_both:
dest_val = self._find_value(name, dest_opts_val)
src_val = self._find_value(name, src_opts_val)
if dest_val is not None and dest_val != src_val:
changes.append("%s=%s" % (name.upper(), src_val))
# Get values for those not in destination
for item in in_src_not_dest:
val = self._find_value(item, src_opts_val)
if val is not None:
changes.append("%s=%s" % (item.upper(), val))
return ', '.join(changes)
def _get_table_defns(self, destination, source):
"""Get the transform fpr the general options for a table
This method creates an ALTER TABLE statement for table definitions
that differ. The items covered include only those options described
in the reference manual as table_options and include the following:
engine, auto_increment, avg_row_count, checksum, collation,
comment, and create options
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - ALTER TABLE clause or None if no transform needed
"""
changes = self._check_columns([_TABLE_COMMENT], destination, source)
# build a list of the parts
statement_parts = [
# rename
{'fmt': "RENAME TO %s.%s \n", 'col': _IGNORE_COLUMN, 'val': ""},
# engine
{'fmt': "ENGINE=%s", 'col': _TABLE_ENGINE, 'val': ""},
# auto increment
{'fmt': "AUTO_INCREMENT=%s", 'col': _TABLE_AUTO_INCREMENT,
'val': ""},
# collation
{'fmt': "COLLATE=%s", 'col': _TABLE_COLLATION, 'val': ""},
# comment - always include to ensure comments can be removed
{'fmt': "COMMENT='%s'", 'col': _IGNORE_COLUMN,
'val': source[_TABLE_COMMENT]},
# create options - will be completed later
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': ""},
]
dest_create = destination[_TABLE_CREATE_OPTIONS]
src_create = source[_TABLE_CREATE_OPTIONS]
if dest_create != src_create:
create = statement_parts[5]
opt_val = self._parse_table_options(dest_create, src_create)
if opt_val is None:
return ("# WARNING: the destination table contains options "
"that are not in the source.\n# Cannot generate ALTER "
"statement.")
else:
create['val'] = "%s" % opt_val
changes = True
# if no changes, return None
if not changes and not self._fill_values(statement_parts, False,
destination, source):
return None
# We need to check the comment again and include it if source == ''
if self._check_columns([_TABLE_COMMENT], destination, source) and \
source[_TABLE_COMMENT] == '':
statement_parts[4]['col'] = _FORCE_COLUMN
# Check for rename
if destination[_TABLE_NAME] != source[_TABLE_NAME]:
statement_parts[0]['val'] = (source[_DB_NAME], source[_TABLE_NAME])
# check and set commas
do_comma = False
for part in statement_parts:
if do_comma:
part['fmt'] = ', ' + part['fmt']
elif part['col'] == _FORCE_COLUMN or part['val'] != '':
do_comma = True
return self._build_statement(statement_parts)
@staticmethod
def _get_column_format(col_data):
"""Build the column data type format string
col_data[in] the row containing the column definition
Retuns string - column data type format
"""
if col_data is None:
return ""
col_fmt = "%(type)s%(null)s%(default)s%(extra)s%(comment)s"
values = {
'type': col_data[_COLUMN_TYPE],
'null': "",
'default': "",
'extra': "",
'comment': "",
}
if col_data[_COLUMN_IS_NULLABLE].upper() == "NO":
values['null'] = " NOT NULL"
else:
values['null'] = " NULL"
if col_data[_COLUMN_DEFAULT] is not None and \
len(col_data[_COLUMN_DEFAULT]) > 0:
def_val = col_data[_COLUMN_DEFAULT]
# add quotes if needed
if def_val.upper() != "CURRENT_TIMESTAMP":
def_val = to_sql(def_val)
values['default'] = " DEFAULT %s" % def_val
if len(col_data[_COLUMN_EXTRA]) > 0:
if col_data[_COLUMN_EXTRA].upper() != "AUTO_INCREMENT":
values['extra'] = " %s" % col_data[_COLUMN_EXTRA]
if len(col_data[_COLUMN_COMMENT]) > 0:
values['comment'] = " COMMENT '%s'" % col_data[_COLUMN_COMMENT]
return col_fmt % values
@staticmethod
def _get_column_position(destination_def, source_def, destination, source,
drop_cols, add_cols):
"""Get the column position in the list
destination_def[in] destination column definition
source_def[in] source column definition
destination[in] destination column definitions
source[in] source column definitions
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
add_cols[in] list of columns to be added - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
Returns string - 'BEFORE' or 'AFTER' for column position or "" if
position cannot be determined (add or drop column)
"""
# Converting ordinal position to index positions:
#
# - ordinal positions start counting at 1
# - list indexes start at 0
#
# So if you want to find the column that is one less than the ordinal
# position of the current column, you must subtract 1 then subtract 1
# again to convert it to the list index.
dest_loc_idx = None
src_loc_idx = int(source_def[_COLUMN_ORDINAL_POSITION]) - 1
if destination_def is not None:
dest_loc_idx = int(destination_def[_COLUMN_ORDINAL_POSITION]) - 1
# Check to see if previous column has been dropped. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if dest_loc_idx is not None and dest_loc_idx - 1 >= 0 and \
destination[dest_loc_idx - 1][_COLUMN_NAME] in drop_cols:
return ""
# Check to see if previous column has been added. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if (src_loc_idx - 1 >= 0
and source[src_loc_idx - 1][_COLUMN_NAME] in add_cols):
return ""
# compare ordinal position - if not the same find where it goes
if dest_loc_idx is None or dest_loc_idx != src_loc_idx:
if src_loc_idx == 0:
return " FIRST"
for col in source:
if src_loc_idx == int(col[_COLUMN_ORDINAL_POSITION]):
return " AFTER %s" % col[_COLUMN_NAME]
return ""
@staticmethod
def _find_column(name, columns):
"""Find a column in a list by name
name[in] name of the column
columns[in] list of column definitions
Returns - column definition or None if column not found
"""
for col_def in columns:
if name == col_def[_COLUMN_NAME]:
return col_def
return None
def _get_column_change(self, column, destination, source,
drop_cols, add_cols):
"""Identify if column differs and return the changes
column[in] column name and operation type
destination[in] column definitions for destination
source[in] column definitions for source
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns
add_cols[in] list of columns to be added - used to
calculate position of existing columns
Returns string - new changes for column or ""
"""
operation = column[1]
# Get column from the origins
destination_def = self._find_column(column[0], destination)
source_def = self._find_column(column[0], source)
# Here we look for columns that are set for checking the order but
# the extra data (null, etc.) is different. So we change it to
# a type change instead. Exclude key column in compare.
if operation == _CHANGE_COL_ORDER and \
destination_def[:_COLUMN_KEY] != source_def[:_COLUMN_KEY]:
operation = _CHANGE_COL_TYPE
# Check for drop column
if operation == _DROP_COL:
colstr = " DROP COLUMN %s" % destination_def[_COLUMN_NAME]
else:
# Determine position and get the type format string
col_pos = self._get_column_position(destination_def, source_def,
destination, source,
drop_cols, add_cols)
col_fmt = self._get_column_format(source_def)
# Check for order changes
if operation == _CHANGE_COL_ORDER:
if len(col_pos) > 0:
colstr = " CHANGE COLUMN %s %s %s%s" % \
(source_def[_COLUMN_NAME],
source_def[_COLUMN_NAME],
col_fmt, col_pos)
else:
colstr = "" # No change needed here
# Add or change column
elif operation == _ADD_COL:
colstr = " ADD COLUMN %s %s%s" % (source_def[_COLUMN_NAME],
col_fmt, col_pos)
else: # must be change
colstr = " CHANGE COLUMN %s %s " % \
(destination_def[_COLUMN_NAME],
destination_def[_COLUMN_NAME])
colstr += "%s%s" % (col_fmt, col_pos)
return colstr
def _get_columns(self, destination, source):
"""Get the column definition changes
This method loops through the columns and if different builds ALTER
statments for transforming the columns of the destination table to the
source table.
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - ALTER statement or None if no column differences.
"""
from mysql.utilities.common.dbcompare import get_common_lists
drop_clauses = []
add_clauses = []
# Build lists with minimal matching data (column name and type) for
# destination and source. Then do the compare. Result is as follows:
#
# - those in both (name, type) will need to be checked for order
# of cols to generate CHANGE COLUMN x x <type> BEFORE|AFTER x
# - those in destination but not source will be dropped unless the
# name appears in source but not destination to generate
# DROP COULMN x
# - those in destination but not source where the name does appear in
# source is a change of type to generate CHANGE COLUMN x x <type>
# - those in source but not destination that don't match by name in
# destination but not source are new columns to generate
# ADD COLUMN x <type>
# - those columns that match on both name and type need to be
# checked for order changes to generate the
# CHANGE COLUMN x BEFORE|AFTER
# - we need to check those that the column order changes to see
# if they are actually extra col def changes
dest_min = [item[1:3] for item in destination] # name, type
src_min = [item[1:3] for item in source] # name, type
# find matches by name + type
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
(both_min, _, _,) = get_common_lists(dest_min, src_min)
dest_src_names = [item[0] for item in dest_min] # only name
src_dest_names = [item[0] for item in src_min] # only name
# find matches by name only
both_names = [item[0] for item in both_min] # only name
both_check, dest_drop, src_new = get_common_lists(dest_src_names,
src_dest_names)
# find matches by name but not type
both_change_type = list(set(both_check) - set(both_names))
# remove type changes and form list for checking order
both_change_order = list(set(both_names) - set(both_change_type))
column_drops = []
column_changes = [] # a list of tuples in form (col_name, operation)
# Form drops
for col in dest_drop:
column_drops.append((col, _DROP_COL))
# Build the drop statements
for col in column_drops:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
drop_clauses.insert(0, change_str)
else:
drop_clauses.append(change_str)
# Form change type
for col in both_change_type:
column_changes.append((col, _CHANGE_COL_TYPE))
# Form add column
for col in src_new:
column_changes.append((col, _ADD_COL))
# Form change order
for col in both_change_order:
column_changes.append((col, _CHANGE_COL_ORDER))
# Build the add/change statements
for col in column_changes:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
add_clauses.insert(0, change_str)
else:
add_clauses.append(change_str)
return (drop_clauses, add_clauses)
def _get_foreign_keys(self, src_db, src_name, dest_db, dest_name):
"""Get the foreign key constraints
This method returns the table foreign keys via ALTER TABLE clauses
gathered from the Table class methods.
src_db[in] database name for source table
src_name[in] table name for source table
dest_db[in] database name for destination table
dest_name[in] table name for destination table
Returns tuple - (drop, add/changes)
"""
from mysql.utilities.common.table import Table
from mysql.utilities.common.dbcompare import get_common_lists
# Get the Table instances
self.dest_tbl = Table(self.destination_db.source, "%s.%s" %
(dest_db, dest_name))
self.src_tbl = Table(self.source_db.source, "%s.%s" %
(src_db, src_name))
drop_constraints = []
add_constraints = []
# Now we do foreign keys
dest_fkeys = self.dest_tbl.get_tbl_foreign_keys()
src_fkeys = self.src_tbl.get_tbl_foreign_keys()
# Now we determine the foreign keys we need to add and those to drop
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
_, drop_rows, add_rows = get_common_lists(dest_fkeys, src_fkeys)
# Generate DROP foreign key clauses
for fkey in drop_rows:
drop_constraints.append(" DROP FOREIGN KEY %s" % fkey[0])
# if fkey[0] not in drop_idx_recorded:
# constraints.append(" DROP INDEX %s" % fkey[0])
# Generate Add foreign key clauses
clause_fmt = "ADD CONSTRAINT %s FOREIGN KEY(%s) REFERENCES " + \
"`%s`.`%s`(%s)"
for fkey in add_rows:
add_constraints.append(clause_fmt % fkey)
return (drop_constraints, add_constraints)
@staticmethod
def _get_index_sql_clauses(rows):
"""Return the ALTER TABLE index clauses for the table.
This method returns the SQL index clauses for use in ALTER or CREATE
TABLE commands for defining the indexes for the table.
rows[in] result set of index definitions
Returns list - list of SQL index clause statements or
[] if no indexes
"""
index_clauses = []
if rows != []:
pri_key_cols = []
unique_indexes = []
unique_key_cols = []
unique_name = None
unique_method = None
unique_setting = None
for key in rows:
if key[2] == 'PRIMARY':
q_key = quote_with_backticks(key[4])
pri_key_cols.append(q_key)
else:
if unique_name is None:
unique_name = key[2]
unique_method = key[10]
unique_setting = key[1]
unique_key_cols.append(key[4])
elif unique_name == key[2]:
unique_key_cols.append(key[4])
else:
unique_indexes.append((unique_name, unique_method,
unique_setting,
unique_key_cols))
unique_key_cols = []
unique_name = key[2]
unique_method = key[10]
unique_setting = key[1]
unique_key_cols.append(key[4])
# add the last one
if unique_name is not None:
unique_indexes.append((unique_name, unique_method,
unique_setting,
unique_key_cols))
# Build SQL statement clause
if len(pri_key_cols) > 0:
index_clauses.append(" ADD PRIMARY KEY(%s)" %
','.join(pri_key_cols))
if len(unique_indexes) > 0:
for idx in unique_indexes:
create_idx = " ADD "
if int(idx[2]) != 1:
create_idx += "UNIQUE "
if idx[1] == "FULLTEXT":
create_idx += "FULLTEXT "
if (idx[1] == "RTREE"):
using = " USING %s" % (idx[1])
else:
using = ""
create_idx += "INDEX %s%s (%s)" % \
(idx[0], using,
','.join(idx[3]))
index_clauses.append(create_idx)
return index_clauses
def _get_indexes(self, src_db, src_name, dest_db, dest_name):
"""Get the index constraints
This method returns the table primary keys, and other indexes via
ALTER TABLE clauses gathered from the Table class methods.
src_db[in] database name for source table
src_name[in] table name for source table
dest_db[in] database name for destination table
dest_name[in] table name for destination table
Returns tuple - (drop, add/changes)
"""
from mysql.utilities.common.table import Table
from mysql.utilities.common.dbcompare import get_common_lists
# Get the Table instances
self.dest_tbl = Table(self.destination_db.source, "%s.%s" %
(dest_db, dest_name))
self.src_tbl = Table(self.source_db.source, "%s.%s" %
(src_db, src_name))
drop_indexes = []
add_indexes = []
# Get the list of indexes
# Do not compare with the name of the tables
dest_idx = [('',) + tuple(idx[1:])
for idx in self.dest_tbl.get_tbl_indexes()]
src_idx = [('',) + tuple(idx[1:])
for idx in self.src_tbl.get_tbl_indexes()]
# Now we determine the indexes we need to add and those to drop
_, drop_idx, add_idx = get_common_lists(dest_idx, src_idx)
if not drop_idx and not add_idx:
return ([], [])
# Generate DROP index clauses
drop_idx_recorded = [] # used to avoid duplicate index drops
for index in drop_idx:
if index[2] == "PRIMARY":
drop_indexes.append(" DROP PRIMARY KEY")
elif index[2] not in drop_idx_recorded:
drop_indexes.append(" DROP INDEX %s" % index[2])
drop_idx_recorded.append(index[2])
# Generate ADD index clauses
if len(add_idx) > 0:
add_indexes.extend(self._get_index_sql_clauses(add_idx))
return (drop_indexes, add_indexes)
@staticmethod
def _check_for_partitions(destination_row, source_row):
"""Determine if there are transformations involving partitions
This method returns TRUE if the destination and source differ in
partitioning configurations
destination_row[in] the original object definition or data
source_row[in] the source object definition or data
Returns bool - True = differences found, False = no differences
"""
#
# TODO: Complete this operation with a new worklog.
# This release does not support transformation of partitions.
part_changes_found = False
if len(destination_row) != len(source_row):
part_changes_found = True
elif len(destination_row) == 0:
return None
elif len(destination_row) == 1:
if not (destination_row[0][3] is None
and source_row[0][3] is None):
part_changes_found = True
else:
part_stop = len(destination_row)
row_stop = len(destination_row[0])
for i in range(0, part_stop):
for j in range(0, row_stop):
if destination_row[i][j] != source_row[i][j]:
part_changes_found = True
break
return part_changes_found
def _transform_table(self):
"""Transform a table definition
This method will transform a table definition to match the source
configuration. It returns the ALTER TABLE SQL statement to
transform the object or None if no transformation is needed.
Note: The incoming lists contain a tuple defined as:
(table definitions, columns, partitions, constraints)
for destination and source.
Returns list - ALTER TABLE statements for transforming the table
"""
statements = []
# Collect a list of all of the ALTER clauses. Order is important in
# building an ALTER TABLE statement. For safety (and correct execution)
# we must order the clauses as follows:
#
# - drop foreign key constraints
# - drop indexes
# - drop columns
# - add/change columns
# - add/change indexes
# - add/change foreign keys
# - general table changes
#
# Note: partition changes not supported by this release
src_db_name = self.source[_TABLE_DEF][_TABLE_DB]
src_tbl_name = self.source[_TABLE_DEF][_TABLE_NAME]
dest_db_name = self.destination[_TABLE_DEF][_TABLE_DB]
dest_tbl_name = self.destination[_TABLE_DEF][_TABLE_NAME]
# Quote identifiers with bacticks
q_src_db_name = quote_with_backticks(src_db_name)
q_src_tbl_name = quote_with_backticks(src_tbl_name)
q_dest_db_name = quote_with_backticks(dest_db_name)
q_dest_tbl_name = quote_with_backticks(dest_tbl_name)
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER TABLE"},
# object name
{'fmt': " %s.%s", 'col': _IGNORE_COLUMN,
'val': (q_dest_db_name, q_dest_tbl_name)},
# alter clauses - will be completed later
{'fmt': " \n%s", 'col': _IGNORE_COLUMN, 'val': ""},
]
# For foreign key changes, we need two collections: drop statements,
# add and change statements. Method returns tuple of (drop, add).
fkeys = self._get_foreign_keys(q_src_db_name, q_src_tbl_name,
q_dest_db_name, q_dest_tbl_name)
# For index changes, we need two collections: drop statements, add and
# change statements. Method returns tuple of (drop, add).
indexes = self._get_indexes(q_src_db_name, q_src_tbl_name,
q_dest_db_name, q_dest_tbl_name)
# For column changes, we need two collections: drop statements, add and
# change statements. Method returns tuple of (drop, add/change).
columns = self._get_columns(self.destination[_COLUMN_DEF],
self.source[_COLUMN_DEF])
# Now add drops then add/changes
for i in range(0, 2):
statements.extend(fkeys[i])
statements.extend(indexes[i])
statements.extend(columns[i])
# General definition returns a single string of the option changes
if not self.skip_table_opts:
gen_defn = self._get_table_defns(self.destination[_TABLE_DEF],
self.source[_TABLE_DEF])
else:
gen_defn = None
if gen_defn is not None:
statements.append(gen_defn)
# Form the SQL command.
statement_parts[2]['val'] = ', \n'.join(statements)
sql_stmts = ["%s;" % self._build_statement(statement_parts)]
# Currently, we check partitions last because this code will
# generate a warning message. Later once this code it complete,
# it can be moved where it belongs in the order of creation of
# the ALTER TABLE statement
if self._check_for_partitions(self.destination[_PART_DEF],
self.source[_PART_DEF]):
sql_stmts.append("# WARNING: Partition transformation is not "
"supported in this release.\n# Please check "
"the table definitions for partition changes.")
return sql_stmts
def _transform_view(self):
"""Transform a view definition
This method will transform a view definition to match the source
configuration. It returns the CREATE OR ALTER VIEW SQL statement to
transform the object or None if no transformation is needed.
Returns list - ALTER VIEW statement for transforming the view
"""
statements = []
# check for create
do_create = self._check_columns([_VIEW_CHECK])
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "CREATE" if do_create else "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _VIEW_DEFINER, 'val': ""},
# security
{'fmt': " SQL SECURITY %s", 'col': _VIEW_SECURITY, 'val': ""},
# object type and name
{'fmt': " VIEW %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_VIEW_DB],
self.destination[_VIEW_NAME])},
# definition
{'fmt': " AS \n %s", 'col': _VIEW_BODY, 'val': ""},
# check option (will be updated later)
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': ""}
]
changes = False
# view check option is special - we have to handle that separately
if self.destination[_VIEW_CHECK] != self.source[_VIEW_CHECK]:
if self.source[_VIEW_CHECK].upper() != 'NONE':
check = statement_parts[5]
check['val'] = " WITH %s CHECK OPTION" % \
self.source[_VIEW_CHECK]
changes = True
# if no changes, return None
if not changes and not self._fill_values(statement_parts, do_create):
return None
# check to see if definer or security or check option have changed and
# if so add definition (always needed if these change)
if self._check_columns([_VIEW_DEFINER, _VIEW_SECURITY, _VIEW_CHECK]):
statement_parts[4]['val'] = self.source[_VIEW_BODY]
# form the drop if we do a create
if do_create:
statements.append("DROP VIEW IF EXISTS `%s`.`%s`;" %
(self.destination[_VIEW_DB],
self.destination[_VIEW_NAME]))
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _transform_trigger(self):
"""Transform a trigger definition
This method will transform a trigger definition to match the source
configuration. It returns the appropriate SQL statement(s) to
transform the object or None if no transformation is needed.
Returns list - SQL statement(s) for transforming the trigger
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "CREATE"},
# definer
{'fmt': " DEFINER=%s", 'col': _TRIGGER_DEFINER, 'val': ""},
# object name
{'fmt': " TRIGGER %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_TRIGGER_DB],
self.destination[_TRIGGER_NAME])},
# trigger timing
{'fmt': " %s", 'col': _TRIGGER_TIME, 'val': ""},
# trigger event
{'fmt': " %s", 'col': _TRIGGER_EVENT, 'val': ""},
# trigger table
{'fmt': " ON %s." % self.destination[_TRIGGER_DB] +
"%s FOR EACH ROW",
'col': _TRIGGER_TABLE, 'val': ""},
# trigger body
{'fmt': " %s;", 'col': _TRIGGER_BODY, 'val': ""},
]
# Triggers don't have ALTER SQL so we just pass back a drop + create.
# if no changes, return None
if not self._fill_values(statement_parts, True):
return None
statements.append("DROP TRIGGER IF EXISTS `%s`.`%s`;" %
(self.destination[_TRIGGER_DB],
self.destination[_TRIGGER_NAME]))
sql_stmt = self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _transform_routine(self):
"""Transform a routine definition
This method will transform a routine (FUNCTION or PROCEDURE) definition
to match the source configuration. It returns the ALTER [FUNCTION |
PROCEDURE] SQL statement to transform the object or None if no
transformation is needed.
Returns list - [CREATE|ALTER] [FUNCTION|PROCEDURE] statement for
transforming the routine
"""
statements = []
# check for create
do_create = self._check_columns([_ROUTINE_BODY,
_ROUTINE_DEFINER,
_ROUTINE_PARAMS])
# Quote destination db and routine names with backticks
q_dest_db = quote_with_backticks(self.destination[_ROUTINE_DB])
q_dest_routine = quote_with_backticks(self.destination[_ROUTINE_NAME])
# build a list of the parts
statement_parts = [
# delimiter
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "DELIMITER //\n"},
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "CREATE" if do_create else "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _ROUTINE_DEFINER,
'val': ""},
# object type and name
{'fmt': " %s %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.obj_type.upper(), q_dest_db, q_dest_routine)},
# parameters
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# returns (Functions only)
{'fmt': " RETURNS %s", 'col': _IGNORE_COLUMN, 'val': ""},
# access method
{'fmt': " %s", 'col': _ROUTINE_SQL_DATA_ACCESS, 'val': ""},
# deterministic (Functions only)
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# security
{'fmt': " SQL SECURITY %s", 'col': _ROUTINE_SECURITY_TYPE,
'val': ""},
# comment
{'fmt': " COMMENT '%s'", 'col': _ROUTINE_COMMENT, 'val': ""},
# body
{'fmt': " %s", 'col': _ROUTINE_BODY, 'val': ""},
# reset delimiter
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "//\nDELIMITER ;\n"},
]
# if no changes, return None
if not self._fill_values(statement_parts, do_create):
return None
# Add parameters and DEFINER if CREATE statement.
if do_create:
statement_parts[4]['val'] = \
'({0})'.format(self.source[_ROUTINE_PARAMS])
# Quote DEFINER with backticks
statement_parts[2]['val'] = \
quote_with_backticks_definer(self.source[_ROUTINE_DEFINER])
# Add the returns for functions
# Only when doing create or modifications to the body
if self.obj_type.upper() == "FUNCTION":
if (do_create or
self.destination[_ROUTINE_BODY] != self.source[_ROUTINE_BODY]):
statement_parts[5]['val'] = self.source[_ROUTINE_RETURNS]
# Add deterministic
if do_create:
if self.source[_ROUTINE_IS_DETERMINISTIC] == "YES":
statement_parts[7]['val'] = "DETERMINISTIC"
else:
statement_parts[7]['val'] = "NOT DETERMINISTIC"
# form the drop if we do a create
if do_create:
statements.append(
"DROP {0} IF EXISTS {1}.{2};".format(
self.obj_type.upper(), q_dest_db, q_dest_routine
)
)
statements.append(self._build_statement(statement_parts))
return statements
def _transform_event(self):
"""Transform a event definition
This method will transform a event definition to match the source
configuration. It returns the ALTER EVENT SQL statement to
transform the object or None if no transformation is needed.
Notes:
The DEFINER does not compare properly for SHOW CREATE EVENT
comparison.
The RENAME cannot be processed because it requires a different
name and mysqldiff compares on like names.
Returns list - ALTER EVENT statement for transforming the event
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _EVENT_DEFINER, 'val': ""},
# type
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': "EVENT"},
# object name
{'fmt': " %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_EVENT_DB],
self.destination[_EVENT_NAME])},
# schedule - will be filled in later
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# complete
{'fmt': " ON COMPLETION %s", 'col': _EVENT_ON_COMPLETION,
'val': ""},
# rename
{'fmt': " RENAME TO %s", 'col': _EVENT_NAME, 'val': ""},
# status
{'fmt': " %s", 'col': _EVENT_STATUS,
'val': self.source[_EVENT_STATUS]},
# event body
{'fmt': " DO %s", 'col': _EVENT_BODY, 'val': ""},
]
# We can only do the columns we know about and must ignore the others
# like STARTS which may be Ok to differ.
changes = self._check_columns([_EVENT_ON_COMPLETION, _EVENT_STATUS,
_EVENT_BODY, _EVENT_NAME, _EVENT_ENDS,
_EVENT_INTERVAL_FIELD, _EVENT_STARTS,
_EVENT_INTERVAL_VALUE, _EVENT_TYPE])
# We do the schedule separately because requires additional checks
if changes:
schedule = statement_parts[4]
schedule['val'] = "ON SCHEDULE"
if self.source[_EVENT_TYPE].upper() == "RECURRING":
schedule['val'] += " EVERY %s" % \
self.source[_EVENT_INTERVAL_VALUE]
schedule['val'] += " %s" % \
self.source[_EVENT_INTERVAL_FIELD].upper()
if self.source[_EVENT_STARTS] is not None:
schedule['val'] += " STARTS '%s'" % self.source[_EVENT_STARTS]
if self.source[_EVENT_ENDS] is not None:
schedule['val'] += " ENDS '%s'" % self.source[_EVENT_ENDS]
# if no changes, return None
if not changes:
return None
self._fill_values(statement_parts, False)
# We must fix the status value
status = statement_parts[7]
if status['val'].upper() == "DISABLED":
status['val'] = "DISABLE"
elif status['val'].upper() == "ENABLED":
status['val'] = "ENABLE"
elif status['val'].upper() == "SLAVESIDE_DISABLED":
status['val'] = "DISABLE ON SLAVE"
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _check_columns(self, col_list, destination=None, source=None):
"""Check for special column changes to trigger a CREATE
This method checks a specific list of columns to see if the values
differ from the destination and source. If they do, the method returns
True else it returns False.
col_list[in] a list of column numbers to check
destination[in] If not None, use this list for destination
(default = None)
source[in] If not None, use this list for source
(default = None)
Returns bool - True = there are differences, False = no differences
"""
if destination is None:
destination = self.destination
if source is None:
source = self.source
for column_num in col_list:
if destination[column_num] != source[column_num]:
return True
return False
def _fill_values(self, stmt_parts, create=False,
destination=None, source=None):
"""Fill the structure with values
This method loops through all of the column dictionaries filling in
the value for any that differ from the destination to the source. If
create is True, it will also fill in the values from the source to
permit the completion of a CREATE statement.
stmt_parts[in] a list of column dictionaries
create[in] if True, fill in all values
if False, fill in only those values that differ
(default = False)
destination[in] If not None, use this list for destination
(default = None)
source[in] If not None, use this list for source
(default = None)
Returns bool - True if changes found
"""
if destination is None:
destination = self.destination
if source is None:
source = self.source
changes_found = False
for part in stmt_parts:
col = part['col']
if col != _IGNORE_COLUMN:
if source[col] is not None and destination[col] != source[col]:
part['val'] = source[col]
changes_found = True
elif create:
part['val'] = destination[col]
return changes_found
@staticmethod
def _build_statement(stmt_parts):
"""Build the object definition statement
This method will build a completed statement based on the list of parts
provided.
stmt_parts[in] a list of column dictionaries
create[in] if True, fill in all values
if False, fill in only those values that differ
(default = False)
Returns string - the object definition string
"""
stmt_values = []
for part in stmt_parts:
if part['col'] == _FORCE_COLUMN or part['val'] != "":
stmt_values.append(part['fmt'] % part['val'])
return ''.join(stmt_values)
|
gpl-2.0
| -7,321,135,944,833,890,000
| 39.288802
| 79
| 0.560101
| false
| 4.275855
| false
| false
| false
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-annotationfilter/package.py
|
1
|
1981
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAnnotationfilter(RPackage):
"""This package provides class and other infrastructure to implement
filters for manipulating Bioconductor annotation resources. The
filters will be used by ensembldb, Organism.dplyr, and other
packages."""
homepage = "https://bioconductor.org/packages/AnnotationFilter/"
url = "https://git.bioconductor.org/packages/AnnotationFilter"
list_url = homepage
version('1.0.0', git='https://git.bioconductor.org/packages/AnnotationFilter', commit='a9f79b26defe3021eea60abe16ce1fa379813ec9')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-lazyeval', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.0.0')
|
lgpl-2.1
| -4,270,355,889,196,540,000
| 46.166667
| 133
| 0.683998
| false
| 3.962
| false
| false
| false
|
fugitifduck/exabgp
|
lib/exabgp/bgp/message/update/nlri/evpn/nlri.py
|
1
|
2044
|
"""
evpn.py
Created by Thomas Morin on 2014-06-23.
Copyright (c) 2014-2015 Orange. All rights reserved.
"""
from struct import pack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
# ========================================================================= EVPN
# +-----------------------------------+
# | Route Type (1 octet) |
# +-----------------------------------+
# | Length (1 octet) |
# +-----------------------------------+
# | Route Type specific (variable) |
# +-----------------------------------+
class EVPN (object):
registered_evpn = dict()
# NEED to be defined in the subclasses
CODE = -1
NAME = 'unknown'
SHORT_NAME = 'unknown'
# lower case to match the class Address API
afi = AFI(AFI.l2vpn)
safi = SAFI(SAFI.evpn)
def __init__ (self, packed):
self.packed = packed
def _prefix (self):
return "evpn:%s:" % (self.registered_evpn.get(self.CODE,self).SHORT_NAME.lower())
def __str__ (self):
return "evpn:%s:%s" % (self.registered_evpn.get(self.CODE,self).SHORT_NAME.lower(),'0x' + ''.join('%02x' % ord(_) for _ in self.packed))
def __repr__ (self):
return str(self)
def pack (self):
return pack('!BB',self.CODE,len(self.packed)) + self.packed
def __len__ (self):
return len(self.packed) + 2
# For subtype 2 (MAC/IP advertisement route),
# we will have to ignore a part of the route, so this method will be overridden
def __cmp__ (self, other):
if not isinstance(other,EVPN):
return -1
if self.CODE != other.CODE:
return -1
if self.packed != other.packed:
return -1
return 0
def __hash__ (self):
return hash("%s:%s:%s:%s" % (self.afi,self.safi,self.CODE,self.packed))
@staticmethod
def register_evpn (klass):
EVPN.registered_evpn[klass.CODE] = klass
@classmethod
def unpack (cls, data):
code = ord(data[0])
length = ord(data[1])
if code in cls.registered_evpn:
return cls.registered_evpn[code].unpack(data[length+1:])
klass = cls(data[length+1:])
klass.CODE = code
return klass
|
bsd-3-clause
| -7,025,846,381,177,260,000
| 23.926829
| 138
| 0.576321
| false
| 3.073684
| false
| false
| false
|
vherman3/AxonSegmentation
|
AxonDeepSeg/learn_model.py
|
1
|
12287
|
import tensorflow as tf
import math
import numpy as np
import os
import pickle
import time
from learning.input_data import input_data
import sys
def learn_model(trainingset_path, model_path, model_restored_path = None, learning_rate = None, verbose = 1):
if not learning_rate :
learning_rate = 0.0005
# Divers variables
Loss = []
Epoch = []
Accuracy = []
Report = ''
verbose = 1
# Training or Predicting
restore = True
# Results and Models
folder_model = model_path
if not os.path.exists(folder_model):
os.makedirs(folder_model)
display_step = 100
save_step = 600
# Network Parameters
image_size = 256
n_input = image_size * image_size
n_classes = 2
dropout = 0.75
depth = 6
hyperparameters = {'depth': depth,'dropout': dropout, 'image_size': image_size,
'model_restored_path': model_restored_path, 'restore': restore}
with open(folder_model+'/hyperparameters.pkl', 'wb') as handle :
pickle.dump(hyperparameters, handle)
# Optimization Parameters
batch_size = 1
training_iters = 500000
epoch_size = 200
Report += '\n\n---Savings---'
Report += '\n Model saved in : '+ folder_model
Report += '\n\n---PARAMETERS---\n'
Report += 'learning_rate : '+ str(learning_rate)+'; \n batch_size : ' + str(batch_size) +';\n depth : ' + str(depth) \
+';\n epoch_size: ' + str(epoch_size)+';\n dropout : ' + str(dropout)+';\n restore : ' + str(restore)\
+';\n (if model restored) restored_model :' + str(model_restored_path)
data_train = input_data(trainingset_path=trainingset_path, type='train')
data_test = input_data(trainingset_path=trainingset_path, type='test')
# Graph input
x = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size))
y = tf.placeholder(tf.float32, shape=(batch_size*n_input, n_classes))
keep_prob = tf.placeholder(tf.float32)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout, image_size = image_size):
# Reshape input picture
x = tf.reshape(x, shape=[-1, image_size, image_size, 1])
data_temp = x
data_temp_size = [image_size]
relu_results = []
# contraction
for i in range(depth):
conv1 = conv2d(data_temp, weights['wc1'][i], biases['bc1'][i])
conv2 = conv2d(conv1, weights['wc2'][i], biases['bc2'][i])
relu_results.append(conv2)
conv2 = maxpool2d(conv2, k=2)
data_temp_size.append(data_temp_size[-1]/2)
data_temp = conv2
conv1 = conv2d(data_temp, weights['wb1'], biases['bb1'])
conv2 = conv2d(conv1, weights['wb2'], biases['bb2'])
data_temp_size.append(data_temp_size[-1])
data_temp = conv2
# expansion
for i in range(depth):
data_temp = tf.image.resize_images(data_temp, data_temp_size[-1] * 2, data_temp_size[-1] * 2)
upconv = conv2d(data_temp, weights['upconv'][i], biases['upconv'][i])
data_temp_size.append(data_temp_size[-1]*2)
upconv_concat = tf.concat(concat_dim=3, values=[tf.slice(relu_results[depth-i-1], [0, 0, 0, 0],
[-1, data_temp_size[depth-i-1], data_temp_size[depth-i-1], -1]), upconv])
conv1 = conv2d(upconv_concat, weights['we1'][i], biases['be1'][i])
conv2 = conv2d(conv1, weights['we2'][i], biases['be2'][i])
data_temp = conv2
finalconv = tf.nn.conv2d(conv2, weights['finalconv'], strides=[1, 1, 1, 1], padding='SAME')
final_result = tf.reshape(finalconv, tf.TensorShape([finalconv.get_shape().as_list()[0] * data_temp_size[-1] * data_temp_size[-1], 2]))
return final_result
weights = {'wc1':[],'wc2':[],'we1':[],'we2':[],'upconv':[],'finalconv':[],'wb1':[], 'wb2':[]}
biases = {'bc1':[],'bc2':[],'be1':[],'be2':[],'finalconv_b':[],'bb1':[], 'bb2':[],'upconv':[]}
# Contraction
for i in range(depth):
if i == 0:
num_features_init = 1
num_features = 64
else:
num_features = num_features_init * 2
# Store layers weight & bias
weights['wc1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name = 'wc1-%s'%i))
weights['wc2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name = 'wc2-%s'%i))
biases['bc1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc1-%s'%i))
biases['bc2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc2-%s'%i))
image_size = image_size/2
num_features_init = num_features
num_features = num_features_init*2
weights['wb1']= tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))),name='wb1-%s'%i)
weights['wb2']= tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='wb2-%s'%i)
biases['bb1']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
biases['bb2']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
num_features_init = num_features
for i in range(depth):
num_features = num_features_init/2
weights['upconv'].append(tf.Variable(tf.random_normal([2, 2, num_features_init, num_features]), name='upconv-%s'%i))
biases['upconv'].append(tf.Variable(tf.random_normal([num_features]), name='bupconv-%s'%i))
weights['we1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name='we1-%s'%i))
weights['we2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='we2-%s'%i))
biases['be1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be1-%s'%i))
biases['be2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be2-%s'%i))
num_features_init = num_features
weights['finalconv']= tf.Variable(tf.random_normal([1, 1, num_features, n_classes]), name='finalconv-%s'%i)
biases['finalconv_b']= tf.Variable(tf.random_normal([n_classes]), name='bfinalconv-%s'%i)
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
tf.scalar_summary('Loss', cost)
index = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
mask = tf.argmax(pred, 1)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.initialize_all_variables()
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
# Launch the graph
Report += '\n\n---Intermediary results---\n'
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
last_epoch = 0
if model_restored_path :
folder_restored_model = model_restored_path
saver.restore(sess, folder_restored_model+"/model.ckpt")
file = open(folder_restored_model+'/evolution.pkl','r')
evolution_restored = pickle.load(file)
last_epoch = evolution_restored["steps"][-1]
else:
sess.run(init)
print 'training start'
step = 1
epoch = 1 + last_epoch
while step * batch_size < training_iters:
batch_x, batch_y = data_train.next_batch(batch_size, rnd = True, augmented_data= True)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, p = sess.run([cost, accuracy, pred], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1., index: step*batch_size})
prediction = data_train.read_batch(p, batch_size)[0, :, :, 0]
ground_truth = data_train.read_batch(batch_y, batch_size)[0, :, :, 0]
if verbose == 2:
outputs = "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
print outputs
if step % epoch_size == 0 :
start = time.time()
A = []
L = []
print epoch
data_test.set_batch_start()
print data_test.batch_start
for i in range(data_test.set_size):
batch_x, batch_y = data_test.next_batch(batch_size, rnd=False, augmented_data= False)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
A.append(acc)
L.append(loss)
if verbose >= 1:
print '--\nAccuracy on patch'+str(i)+': '+str(acc)
print 'Loss on patch'+str(i)+': '+str(loss)
Accuracy.append(np.mean(A))
Loss.append(np.mean(L))
Epoch.append(epoch)
output_2 = '\n----\n Epoch: ' + str(epoch)
output_2+= '\n Accuracy: ' + str(np.mean(A))+';'
output_2+= '\n Loss: ' + str(np.mean(L))+';'
print '\n\n----Scores on test:---' + output_2
Report+= output_2
epoch+=1
if step % save_step == 0:
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle:
pickle.dump(evolution, handle)
save_path = saver.save(sess, folder_model+"/model.ckpt")
print("Model saved in file: %s" % save_path)
file = open(folder_model+"/report.txt", 'w')
file.write(Report)
file.close()
step += 1
save_path = saver.save(sess, folder_model+"/model.ckpt")
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle :
pickle.dump(evolution, handle)
print("Model saved in file: %s" % save_path)
print "Optimization Finished!"
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path_training", required=True, help="")
ap.add_argument("-m", "--path_model", required=True, help="")
ap.add_argument("-m_init", "--path_model_init", required=False, help="")
ap.add_argument("-lr", "--learning_rate", required=False, help="")
args = vars(ap.parse_args())
path_training = args["path_training"]
path_model = args["path_model"]
path_model_init = args["path_model_init"]
learning_rate = args["learning_rate"]
if learning_rate :
learning_rate = float(args["learning_rate"])
else : learning_rate = None
learn_model(path_training, path_model, path_model_init, learning_rate)
|
mit
| 6,847,251,026,175,072,000
| 39.96
| 172
| 0.567348
| false
| 3.373696
| true
| false
| false
|
openconfig/oc-pyang
|
openconfig_pyang/plugins/util/html_helper_test.py
|
1
|
2086
|
"""
Copyright 2015 Google, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Test data for html_helper
"""
import html_helper
def main():
a_list = ['red', 'green', 'blue', 'orange']
text = 'a line of text'
attrs = {"class":"my-css-class", "id":"element-id"}
tag = "span"
label = "label:"
paragraph = "Lorem ipsum dolor sit amet, consectetur adipiscing\
elit. Nunc maximus, dui non sollicitudin sollicitudin, leo nibh\
luctus orci, varius maximus lacus nulla eget nibh. Nulla faucibus\
purus nulla, eu molestie massa cursus vitae. Vestibulum metus purus,\
tempus sed risus ac, lobortis efficitur lorem."
ht = html_helper.HTMLHelper()
print ht.h1(text)
print ht.h1(text, attrs)
print "\n"
print ht.h2(text)
print ht.h2(text, attrs)
print "\n"
print ht.h3(text)
print ht.h3(text, attrs)
print "\n"
print ht.h4(text)
print ht.h4(text, attrs)
print "\n"
print ht.h5(text)
print ht.h5(text, attrs)
print "\n"
print ht.h6(text)
print ht.h6(text, attrs)
print ht.h1(text, attrs, 5, True)
print ht.h1(text, attrs, 2, False)
print ht.h(8,text,attrs)
print ht.h(-1,text,attrs)
print ht.hr()
print ht.add_tag (tag, text, attrs)
print ht.add_tag (tag, text)
print "\n"
print ht.para(paragraph, attrs)
print "\n"
print ht.para(ht.add_tag(tag,label) + paragraph)
print "\n"
print ht.open_tag("div")
print ht.para(paragraph)
print ht.close_tag()
# print md.ol(a_list)
# print md.ul(a_list)
# print md.hr()
# print md.i(text)
# print md.b(text)
# print md.code(text)
if __name__ == '__main__':
main( )
|
apache-2.0
| 2,804,690,863,647,126,000
| 21.923077
| 72
| 0.678811
| false
| 2.889197
| false
| false
| false
|
blackjade/aci2xml
|
aci2xml.py
|
1
|
4226
|
#
# Copyright (c) 2015 Fluke Networks.
# All rights reserved.
# No part of this source code may be copied, used, or modified
# without the express written consent of Fluke Networks.
#
# aci2xml.py: Convert the policy manager related section in a *.aci
# file to xml. For example, these lines:
# [\PolicyManager\Alarm0]
# Enable=D_1
# Count_Of_Threshold=D_1
# [\PolicyManager\Alarm0\Threshold0]
# Severity=D_2
# SeverityScore=D_100
# Action=S_Beep
# GroupType=D_2
# SSIDGroupCount=D_1
# SSIDGroup=S_MyWLAN
# ACLGroupCount=D_2
# ACLGroups=S_0,1
# Will be converted to this:
# <Alarm0>
# <AlarmEnabled>1</AlarmEnabled>
# <ThresholdCount>1</ThresholdCount>
# <Threshold0>
# <Severity>2</Severity>
# <Action>Beep</Action>
# <ThresholdInt>0</ThresholdInt>
# <ThresholdString/>
# <GroupType>2</GroupType>
# <IntArray_Count>0</IntArray_Count>
# <IntArray/>
# <FrameCount>50</FrameCount>
# <SignalStrength>15</SignalStrength>
# <IntMap_Count>0</IntMap_Count>
# <IntMap/>
# <SSIDGroups_Count>1</SSIDGroups_Count>
# <SSIDGroups>MyWLAN</SSIDGroups>
# <ACLGroups_Count>1</ACLGroups_Count>
# <ACLGroups>0</ACLGroups>
# </Threshold0>
# </Alarm0>
import os, argparse
import json
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import tostring
import xml.dom.minidom as minidom
def dictToXmlElement(tag, xmlDict):
'''
Convert a dict to xml element
'''
if not xmlDict or not isinstance(xmlDict, dict):
return None
elem = Element(tag)
for key, val in xmlDict.items():
if isinstance(val, dict):
# The next level is also a dict. recursive call to convert any depth
child = dictToXmlElement(key, val)
else:
child = Element(key)
child.text = str(val)
elem.append(child)
return elem
def readAci(fileName):
xmlRoot = dict()
with open(fileName) as f:
currNode = None
for s in f:
s = s.strip()
#print s
if s.startswith('[\\') and s.endswith(']'):
s = s[1:-1].strip()
if s == "":
currNode = None
continue
xmlKeys = s.split('\\')
currNode = xmlRoot
for key in xmlKeys:
if key == "":
continue
if not key in currNode:
currNode[key] = dict()
currNode = currNode[key]
elif '=' in s:
if currNode == None:
print s
else:
pos = s.find('=')
key = s[0:pos]
value = s[pos+3:]
currNode[key] = value
return xmlRoot
def writePolicyManagerXml(xmlDict, fileName):
'''
COnvert a simple dict from reading aci file to xml tree
'''
if 'PolicyManager' in xmlDict:
xmlElem = dictToXmlElement('PolicyManager', xmlDict['PolicyManager'])
xmlString = tostring(xmlElem)
reparsed = minidom.parseString(xmlString)
with open(fileName, 'wb') as f:
reparsed.writexml(f, indent="\t", addindent="\t", newl="\n")
print 'Policy written to:', fileName
def main():
#parser = argparse.ArgumentParser(description='Convert the policy manager related section in a .aci file to xml file.')
#parser.add_argument('aciFile', type=str, help='ACI file name', nargs='?', default='./config/Default.aci')
#parser.add_argument('xmlFile', type=str, help='XML file name', nargs='?', default='./config/Default.xml')
#args = parser.parse_args()
aciFile = './config/Default.aci'
xmlFile = './config/Default.xml'
print 'Converting', aciFile, '->', xmlFile
xmlDict = readAci(aciFile)
if not xmlDict:
print 'Can not open the xml file or it is empty:', xmlFile
writePolicyManagerXml(xmlDict, xmlFile)
print 'Done!'
if __name__ == '__main__':
main()
|
apache-2.0
| 7,077,029,358,062,470,000
| 30.015152
| 123
| 0.559158
| false
| 3.710272
| false
| false
| false
|
freundTech/deepl-cli
|
deepl/__main__.py
|
1
|
2176
|
import argparse
import locale
import sys
from deepl import translator
def print_results(result, extra_data, verbose=False):
if verbose:
print("Translated from {} to {}".format(extra_data["source"], extra_data["target"]))
print(result)
def main():
parser = argparse.ArgumentParser(description="Translate text to other languages using deepl.com")
parser.add_argument("-s", "--source", help="Source language", metavar="lang")
parser.add_argument("-t", "--target", help="Target language", metavar="lang")
parser.add_argument("-i", "--interactive", help="Force interactive mode", action="store_true")
parser.add_argument("-v", "--verbose", help="Print additional information", action="store_true")
parser.add_argument("text", nargs='*')
args = parser.parse_args()
locale_ = locale.getdefaultlocale()
preferred_langs = [locale_[0].split("_")[0].upper()]
if not args.source is None:
source = args.source.upper()
else:
source = 'auto'
if not args.target is None:
target = args.target.upper()
else:
target = None
if len(args.text) == 0 or args.interactive:
if sys.stdin.isatty() or args.interactive:
print("Please input text to translate")
while True:
text = input("> ")
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
if extra_data["source"] not in preferred_langs:
preferred_langs.append(extra_data["source"])
if extra_data["target"] not in preferred_langs:
preferred_langs.append(extra_data["target"])
else:
text = sys.stdin.read()
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
else:
text = " ".join(args.text)
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
if __name__ == "__main__":
main()
|
mit
| -357,198,274,125,609,200
| 35.283333
| 101
| 0.617188
| false
| 4.129032
| false
| false
| false
|
AvengerMoJo/DeepSea
|
srv/modules/runners/upgrade.py
|
1
|
3678
|
# -*- coding: utf-8 -*-
# pylint: disable=modernize-parse-error
"""
Verify that an automated upgrade is possible
"""
from __future__ import absolute_import
from __future__ import print_function
# pylint: disable=import-error,3rd-party-module-not-gated,redefined-builtin
import salt.client
import salt.utils.error
class UpgradeValidation(object):
"""
Due to the current situation you have to upgrade
all monitors before ceph allows you to start any OSD
Our current implementation of maintenance upgrades
triggers this behavior if you happen to have
Monitors and Storage roles assigned on the same node
(And more then one monitor)
To avoid this, before actually providing a proper solution,
we stop users to execute the upgade in the first place.
"""
def __init__(self, cluster='ceph'):
"""
Initialize Salt client, cluster
"""
self.local = salt.client.LocalClient()
self.cluster = cluster
def colocated_services(self):
"""
Check for shared monitor and storage roles
"""
search = "I@cluster:{}".format(self.cluster)
pillar_data = self.local.cmd(
search, 'pillar.items', [], tgt_type="compound")
for host in pillar_data:
if 'roles' in pillar_data[host]:
if ('storage' in pillar_data[host]['roles']
and 'mon' in pillar_data[host]['roles']):
msg = """
************** PLEASE READ ***************
We currently do not support upgrading when
you have a monitor and a storage role
assigned on the same node.
******************************************"""
return False, msg
return True, ""
def is_master_standalone(self):
"""
Check for shared master and storage role
"""
search = "I@roles:master"
pillar_data = self.local.cmd(
search, 'pillar.items', [], tgt_type="compound")
# in case of multimaster
for host in pillar_data:
if 'roles' in pillar_data[host]:
if 'storage' in pillar_data[host]:
msg = """
************** PLEASE READ ***************
Detected a storage role on your master.
This is not supported. Please migrate all
OSDs off the master in order to continue.
******************************************"""
return False, msg
return True, ""
@staticmethod
def is_supported():
"""
Check if the automated upgrade is supported
"""
msg = """
************** PLEASE READ ***************
The automated upgrade is currently not supported.
Please refer to the official documentation.
******************************************"""
return False, msg
def help_():
"""
Usage
"""
usage = (
'salt-run upgrade.check:\n\n'
' Performs a series of checks to verify that upgrades are possible\n'
'\n\n')
print(usage)
return ""
def check():
"""
Run upgrade checks
"""
uvo = UpgradeValidation()
checks = [uvo.is_master_standalone,
uvo.is_supported] # , uvo.colocated_services]
for chk in checks:
ret, msg = chk()
if not ret:
print(msg)
return ret
return ret
__func_alias__ = {
'help_': 'help',
}
|
gpl-3.0
| -6,941,238,278,801,299,000
| 31.263158
| 80
| 0.507069
| false
| 4.603254
| false
| false
| false
|
clones/wtforms
|
wtforms/ext/appengine/fields.py
|
1
|
3025
|
import decimal
from wtforms import fields, widgets
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
"""
widget = widgets.Select()
def __init__(self, label=u'', validators=None, reference_class=None,
label_attr=None, allow_blank=False, blank_text=u'', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
self.label_attr = label_attr
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is None:
raise TypeError('Missing reference_class attribute in '
'ReferencePropertyField')
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
key = str(obj.key())
if key == self._formdata:
self._set_data(key)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.label_attr and getattr(obj, self.label_attr) or key
yield (key, label, key == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if self.data == str(obj.key()):
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def process_data(self, value):
if isinstance(value, list):
value = '\n'.join(value)
self.data = value
def populate_obj(self, obj, name):
if isinstance(self.data, basestring):
value = self.data.splitlines()
else:
value = []
setattr(obj, name, value)
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError(u'Not a valid coordinate location')
|
bsd-3-clause
| -4,359,445,590,405,354,500
| 30.842105
| 100
| 0.549752
| false
| 4.278642
| false
| false
| false
|
pyrocko/pyrocko
|
src/apps/colosseo.py
|
1
|
7525
|
from __future__ import print_function
# http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
import sys
import logging
import os.path as op
from optparse import OptionParser
from pyrocko import util, scenario, guts, gf
from pyrocko import __version__
logger = logging.getLogger('pyrocko.apps.colosseo')
km = 1000.
def d2u(d):
return dict((k.replace('-', '_'), v) for (k, v) in d.items())
description = '''This is Colosseo, an earthquake scenario generator.
Create seismic waveforms, InSAR and GNSS offsets for a simulated earthquake
scenario.
Colosseo is part of Pyrocko. Version %s.
''' % __version__
subcommand_descriptions = {
'init': 'initialize a new, blank scenario',
'fill': 'fill the scenario with modelled data',
'snuffle': 'open Snuffler to inspect the waveform data',
'map': 'map the scenario arena'
}
subcommand_usages = {
'init': 'init <scenario_dir>',
'fill': 'fill <scenario_dir>',
'snuffle': 'snuffle <scenario_dir>',
'map': '<scenario_dir>',
}
subcommands = subcommand_descriptions.keys()
program_name = 'colosseo'
usage_tdata = d2u(subcommand_descriptions)
usage_tdata['program_name'] = program_name
usage_tdata['description'] = description
usage = '''%(program_name)s <subcommand> [options] [--] <arguments> ...
%(description)s
Subcommands:
init %(init)s
fill %(fill)s
snuffle %(snuffle)s
map %(map)s
To get further help and a list of available options for any subcommand run:
%(program_name)s <subcommand> --help
''' % usage_tdata
def die(message, err='', prelude=''):
if prelude:
prelude = prelude + '\n'
if err:
err = '\n' + err
sys.exit('%s%s failed: %s%s' % (prelude, program_name, message, err))
def none_or_float(x):
if x == 'none':
return None
else:
return float(x)
def add_common_options(parser):
parser.add_option(
'--loglevel',
action='store',
dest='loglevel',
type='choice',
choices=('critical', 'error', 'warning', 'info', 'debug'),
default='info',
help='set logger level to '
'"critical", "error", "warning", "info", or "debug". '
'Default is "%default".')
def process_common_options(options):
util.setup_logging(program_name, options.loglevel)
def cl_parse(command, args, setup=None, details=None):
usage = subcommand_usages[command]
descr = subcommand_descriptions[command]
if isinstance(usage, str):
usage = [usage]
susage = '%s %s' % (program_name, usage[0])
for s in usage[1:]:
susage += '\n%s%s %s' % (' '*7, program_name, s)
description = descr[0].upper() + descr[1:] + '.'
if details:
description = description + ' %s' % details
parser = OptionParser(usage=susage, description=description)
if setup:
setup(parser)
add_common_options(parser)
(options, args) = parser.parse_args(args)
process_common_options(options)
return parser, options, args
def get_scenario_yml(path):
fn = op.join(path, 'scenario.yml')
if op.exists(fn):
return fn
return False
def command_init(args):
def setup(parser):
parser.add_option(
'--force', dest='force', action='store_true',
help='overwrite existing files')
parser.add_option(
'--location', dest='location', metavar='LAT,LON',
help='set scenario center location [deg]')
parser.add_option(
'--radius', dest='radius', metavar='RADIUS', type=float,
help='set scenario center location [km]')
parser, options, args = cl_parse('init', args, setup=setup)
if len(args) != 1:
parser.print_help()
sys.exit(1)
if options.location:
try:
lat, lon = map(float, options.location.split(','))
except Exception:
die('expected --location=LAT,LON')
else:
lat = lon = None
if options.radius is not None:
radius = options.radius * km
else:
radius = None
project_dir = args[0]
try:
scenario.ScenarioGenerator.initialize(
project_dir, lat, lon, radius, force=options.force)
gf_stores_path = op.join(project_dir, 'gf_stores')
util.ensuredir(gf_stores_path)
except scenario.CannotCreatePath as e:
die(str(e) + ' Use --force to override.')
except scenario.ScenarioError as e:
die(str(e))
def command_fill(args):
def setup(parser):
parser.add_option(
'--force', dest='force', action='store_true',
help='overwrite existing files')
parser, options, args = cl_parse('fill', args, setup=setup)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
try:
engine = get_engine([gf_stores_path])
sc = guts.load(filename=fn)
sc.init_modelling(engine)
sc.ensure_gfstores(interactive=True)
sc.prepare_data(path=project_dir, overwrite=options.force)
sc.ensure_data(path=project_dir)
sc.make_map(op.join(project_dir, 'map.pdf'))
except scenario.CannotCreatePath as e:
die(str(e) + ' Use --force to override.')
except scenario.ScenarioError as e:
die(str(e))
def command_map(args):
parser, options, args = cl_parse('map', args)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
engine = get_engine([gf_stores_path])
try:
sc = guts.load(filename=fn)
sc.init_modelling(engine)
sc.make_map(op.join(project_dir, 'map.pdf'))
except scenario.ScenarioError as e:
die(str(e))
def command_snuffle(args):
from pyrocko.gui import snuffler
parser, options, args = cl_parse('map', args)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
engine = get_engine([gf_stores_path])
sc = guts.load(filename=fn)
sc.init_modelling(engine)
return snuffler.snuffle(
sc.get_pile(),
stations=sc.get_stations(),
events=sc.get_events())
def main(args=None):
if args is None:
args = sys.argv[1:]
if len(args) < 1:
sys.exit('Usage: %s' % usage)
command = args.pop(0)
if command in subcommands:
globals()['command_' + command](args)
elif command in ('--help', '-h', 'help'):
if command == 'help' and args:
acommand = args[0]
if acommand in subcommands:
globals()['command_' + acommand](['--help'])
sys.exit('Usage: %s' % usage)
else:
sys.exit('%s: error: no such subcommand: %s' % (program_name, command))
def get_engine(gf_store_superdirs):
engine = gf.LocalEngine(
store_superdirs=gf_store_superdirs, use_config=True)
logger.info(
'Directories to be searched for GF stores:\n%s'
% '\n'.join(' ' + s for s in engine.store_superdirs))
return engine
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,485,994,843,530,995,000
| 22.964968
| 79
| 0.592292
| false
| 3.429809
| false
| false
| false
|
Darktel/Homework
|
Translit.py
|
1
|
1089
|
__author__ = 'Darktel'
def translit(Mystring):
"""
String (Rus) -> String (Eng)
"""
RusString = 'а,б,в,г,д,е,ё,ж,з,и,й,к,л,м,н,о,п,р,с,т,у,ф,х,ц,ч,ш,щ,ы,ь,ъ,э,ю,я,*, ,'
EngString = "a,b,v,g,d,e,yo,zh,z,i,j,k,l,m,n,o,p,r,s,t,u,f,h,c,ch,sh,xh,y,',`,q,ju,ya,*,-,"
RusChar = RusString.split(',')
EngChar = EngString.split(',')
translitString = ''
for char in Mystring:
try:
if char.isupper():
charlow = char.lower()
index = RusChar.index(charlow)
translitString += EngChar[index].upper()
else:
index = RusChar.index(char)
translitString += EngChar[index]
except:
translitString += char
return translitString
testString = translit('''доска обрезная 50х150х6000
доска обрезная краснодар цена
цена куба доски обрезной
куб обрезной доски
купить доску обрезную
''')
print(testString)
|
gpl-2.0
| 5,363,980,151,163,219,000
| 22.390244
| 95
| 0.555787
| false
| 2.27791
| false
| false
| false
|
hornedbull/gmailPy
|
gmailPy.py
|
1
|
13792
|
# Vatsal Shah
# ECE-C433 Mini-Project 2
# gmailPy - A terminal gmail client
# Tested on Python 2.7.3
# imapclient is not a part of the standard python library
# install using sudo pip install imapclient
import getpass
from imapclient import IMAPClient
import operator
import email
import optparse
import sys
class gmailPy(object):
def __init__(self):
self.IMAP_SERVER = 'imap.gmail.com'
self.ssl = True
self.myIMAPc = None
self.response = None
self.folders = []
def login(self, username, password):
self.myIMAPc = IMAPClient(self.IMAP_SERVER, ssl=self.ssl)
self.myIMAPc.login(username, password)
# Returns a list of all the folders for a particular account
def get_folders(self):
self.response = self.myIMAPc.list_folders()
for item in self.response:
self.folders.append(item[2].strip('u'))
return self.folders
# Returns the total number of messages in a folder
def get_mail_count(self, folder='Inbox'):
self.response = self.myIMAPc.select_folder(folder, True)
return self.response['EXISTS']
# Method to delete messages based on their size
def delete_bigmail(self, folder='Inbox'):
self.myIMAPc.select_folder(folder, False)
# Gets all the message ids of the messages which are not deleted in the folder
messages = self.myIMAPc.search(['NOT DELETED'])
print "%d messages that aren't deleted" % len(messages)
if len(messages) > 0:
print "You can exit by entering 0 or pressing CTRL+C \n"
else: print "There are no messages in the folder"
# Gets the message sizes for all the message ids returned in previous step
# Note: Just sends one request for all message ids with a return time < 10 ms
self.response = self.myIMAPc.fetch(messages, ['RFC822.SIZE'])
# Sorts the dictionary returned by fetch by size in descending order
sorted_response = sorted(self.response.iteritems(), key=operator.itemgetter(1), reverse=True)
count = 1
try:
for item in sorted_response:
# Gets the biggest message including headers, body, etc.
big_message = self.myIMAPc.fetch(item[0], ['RFC822'])
for msgid, data in big_message.iteritems():
msg_string = data['RFC822']
# Parses the message string using email library
msg = email.message_from_string(msg_string)
val = dict(self.response[msgid])['RFC822.SIZE']
print 'ID %d: From: %s Date: %s' % (msgid, msg['From'], msg['date'])
print 'To: %s' % (msg['To'])
print 'Subject: %s' % (msg['Subject'])
print 'Size: %d bytes \n' % (val)
user_del = raw_input("Do you want to delete this message?(Y/N): ")
if user_del == 'Y':
self.delete_message(msgid)
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
elif user_del == '0':
print "Program exiting"
sys.exit()
else:
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
count += 1
except KeyboardInterrupt:
print "Program exiting"
sys.exit()
# Method to delete messages based on their size with a search criteria
def delete_bigmail_search(self, folder='Inbox', command='', criteria=''):
self.myIMAPc.select_folder(folder, False)
# Gets all the message ids from the server based on the search criteria
messages = self.myIMAPc.search('%s "%s"' % (command, criteria))
print "%d messages that match --> %s: %s" % (len(messages), command, criteria)
if len(messages) > 0:
print "You can exit by entering 0 or pressing CTRL+C \n"
else: print "There are no messages in that matched your search criteria"
# Gets the message sizes for all the message ids returned in previous step
# Note: Just sends one request for all message ids with a return time < 10 ms
self.response = self.myIMAPc.fetch(messages, ['RFC822.SIZE'])
# Sorts the messages in decending order of their sizes
sorted_response = sorted(self.response.iteritems(), key=operator.itemgetter(1), reverse=True)
count = 1
try:
for item in sorted_response:
# Gets the entire content for the biggest message identified
big_message = self.myIMAPc.fetch(item[0], ['RFC822'])
for msgid, data in big_message.iteritems():
msg_string = data['RFC822']
msg = email.message_from_string(msg_string)
val = dict(self.response[msgid])['RFC822.SIZE']
print 'ID %d: From: %s Date: %s' % (msgid, msg['From'], msg['date'])
print 'To: %s' % (msg['To'])
print 'Subject: %s' % (msg['Subject'])
print 'Size: %d bytes \n' % (val)
user_del = raw_input("Do you want to delete this message?(Y/N): ")
if user_del == 'Y':
self.delete_message(msgid)
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
elif user_del == '0':
print "Program exiting"
sys.exit()
else:
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
count += 1
except KeyboardInterrupt:
print "Program exiting"
sys.exit()
# Deletes a message in the current folder based on msg id
def delete_message(self, id):
try:
self.myIMAPc.delete_messages([id])
self.myIMAPc.expunge()
print "Message deleted"
except IMAPClient.Error as err:
print "Message deletion failed"
print err
# Renames a folder
def rename_folder(self, oldfolder, newfolder):
try:
self.myIMAPc.rename_folder(oldfolder, newfolder)
print "Folder %s renamed to %s" % (oldfolder, newfolder)
except IMAPClient.Error as err:
print "Folder renaming failed"
print err
# Creates a new folder
def create_folder(self, folder):
try:
self.myIMAPc.create_folder(folder)
print "New folder %s created" % folder
except IMAPClient.Error as err:
print "Folder creation failed"
print err
# Deletes a folder
def delete_folder(self, folder):
try:
self.myIMAPc.delete_folder(folder)
print "Folder %s deleted" % folder
except IMAPClient.Error as err:
print "Folder deletion failed"
print err
# Creates a new folder and copies the content from the two folders that need to be merged
# Then deletes the old folders
def merge_folders(self, merged_folder, folder_1, folder_2):
try:
self.create_folder(merged_folder)
# Selects the folder with read/write permission
self.myIMAPc.select_folder(folder_1, True)
messages = self.myIMAPc.search(['NOT DELETED'])
print "Moving %d messages from %s to %s" % (len(messages), folder_1, merged_folder)
self.myIMAPc.copy(messages, merged_folder)
self.myIMAPc.select_folder(folder_2, True)
messages = self.myIMAPc.search(['NOT DELETED'])
print "Moving %d messages from %s to %s" % (len(messages), folder_2, merged_folder)
self.myIMAPc.copy(messages, merged_folder)
print "Deleting %s and %s..." % (folder_1, folder_2)
self.delete_folder(folder_1)
self.delete_folder(folder_2)
print "Merge folder operation succeeded"
except IMAPClient.Error as err:
print "Merge operation failed"
print err
def logout(self):
self.myIMAPc.logout()
def main():
# Using parser library for handling command line arguments
usage = "usage: python gmailPy.py [options]"
prog_desc = """gmailPy is a scalable command line gmail client capable of adding, deleting, renaming and merging folders. It also provides interface for the user to delete big messages based on size and search criteria."""
parser = optparse.OptionParser(usage=usage, description=prog_desc)
parser.add_option(
'-l', '--list', help="List folder statistics. This doesn't need any arguments. Usage: python gmailPy.py -l", dest='lf',
default=False, action='store_true')
parser.add_option(
'-b', '--big', help='Delete big messages. Please enter folder name as an argument. For example: python gmailPy.py -b INBOX',
dest='big_folder_name', action='store')
parser.add_option(
'-s', '--bigsearch', help='Delete big messages based on search criteria. This takes 3 arguments folder_name, command and criteria. For example: python gmailPy.py -s INBOX FROM xyz@gmail.com',
dest='bigsearch_folder_name', action='store', nargs=3)
parser.add_option(
'-n', '--new', help='Create new folder. Please enter folder name as an argument. For example: python gmailPy.py -n Test_folder',
dest='new_folder_name', action='store')
parser.add_option(
'-d', '--del', help='Delete a folder. Please enter folder name as an argument. For example: python gmailPy.py -d Test_folder',
dest='del_folder_name', action='store')
parser.add_option(
'-r', '--rename', help='Rename a folder. Please enter old_folder_name and new_folder_name as two arguments. For example: python gmailPy.py -r OLDFOLDERNAME NEWFOLDERNAME',
dest='rename_folder_name', action='store', nargs=2)
parser.add_option(
'-m', '--merge', help='Merge two folders. This takes 3 arguments merged_folder_name , folder_1_name , folder_2_name. For example: python gmailPy.py -m Test_folder_2 Test_folder_0 Test_folder_1',
dest='merge_folder_name', action='store', nargs=3)
(opts, args) = parser.parse_args()
try:
print "***** Welcome to gmailPy!!! A command line GMAIL Client *****"
print "Please enter your username and password >>>>>>"
username = raw_input("Username: ")
password = getpass.getpass()
## Can be set for testing and debugging
# username = 'username'
# password = 'password'
client_session = gmailPy()
client_session.login(username, password)
if opts.lf:
client_folders = client_session.get_folders()
print "########## Your folder Statistics ##########"
for item in client_folders:
try:
print item, ':', client_session.get_mail_count(item), 'messages'
except:
pass
print "############################################"
if opts.big_folder_name != None:
print "Let's enter your %s folder and delete big mail" % opts.big_folder_name
client_session.delete_bigmail(opts.big_folder_name)
available_commands = ['TO', 'FROM', 'SUBJECT']
if opts.bigsearch_folder_name != None:
if opts.bigsearch_folder_name[1] in available_commands:
print "Let's enter your %s folder and delete big mail with %s: %s" % (opts.bigsearch_folder_name[0], opts.bigsearch_folder_name[1], opts.bigsearch_folder_name[2])
client_session.delete_bigmail_search(
opts.bigsearch_folder_name[0], opts.bigsearch_folder_name[1], opts.bigsearch_folder_name[2])
else:
print "Invalid Command Entry. Please enter one of the follwing commands: ", available_commands
if opts.new_folder_name != None:
print "Creating a new folder with name %s ..." % opts.new_folder_name
client_session.create_folder(opts.new_folder_name)
if opts.del_folder_name != None:
print "Deleting %s folder..." % opts.del_folder_name
client_session.delete_folder(opts.del_folder_name)
if opts.rename_folder_name != None:
print "Renaming folder %s to %s..." % (opts.rename_folder_name[0], opts.rename_folder_name[1])
client_session.rename_folder(opts.rename_folder_name[0], opts.rename_folder_name[1])
if opts.merge_folder_name != None:
print "Merging folders %s and %s to %s..." % (opts.merge_folder_name[1], opts.merge_folder_name[2], opts.merge_folder_name[0])
client_session.merge_folders(opts.merge_folder_name[0], opts.merge_folder_name[1], opts.merge_folder_name[2])
client_session.logout()
except IMAPClient.Error as err:
print "Something awful happened"
print err
except KeyboardInterrupt:
print "gmailPy force shutdown"
client_session.logout()
if __name__ == '__main__':
main()
|
gpl-2.0
| -6,506,492,856,560,792,000
| 47.055749
| 226
| 0.583019
| false
| 4.14797
| true
| false
| false
|
xlorepdarkhelm/colors
|
colors/html.py
|
1
|
1085
|
"""Contains the implementation of the HTML color group."""
__all__ = (
'HTML',
)
import enum
from colors import base
class HTML(base.ColorGroup):
"""
The color group for HTML 4.01 approved colors.
These are the colors as defined in the HTML 4.01 specification from 1999.
See Also:
`Wikipedia <https://en.wikipedia.org/wiki/Web_colors#HTML_color_names>`
"""
White = base.RGBColor(255, 255, 255)
Silver = base.RGBColor(192, 192, 192)
Gray = base.RGBColor(128, 128, 128)
Black = base.RGBColor( 0, 0, 0)
Red = base.RGBColor(255, 0, 0)
Maroon = base.RGBColor(128, 0, 0)
Yellow = base.RGBColor(255, 255, 0)
Olive = base.RGBColor(128, 128, 0)
Lime = base.RGBColor( 0, 255, 0)
Green = base.RGBColor( 0, 128, 0)
Aqua = base.RGBColor( 0, 255, 255)
Teal = base.RGBColor( 0, 128, 128)
Blue = base.RGBColor( 0, 0, 255)
Navy = base.RGBColor( 0, 0, 128)
Fuchsia = base.RGBColor(255, 0, 255)
Purple = base.RGBColor(128, 0, 128)
|
mit
| 1,404,955,823,179,742,200
| 28.324324
| 79
| 0.584332
| false
| 2.855263
| false
| false
| false
|
Sumith1896/sympy
|
sympy/matrices/expressions/matadd.py
|
1
|
3161
|
from __future__ import print_function, division
from sympy.core.compatibility import reduce
from operator import add
from sympy.core import Add, Basic, sympify
from sympy.functions import adjoint
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, flatten, sort, condition,
exhaust, do_one, glom)
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError, ZeroMatrix
from sympy.utilities import default_sort_key, sift
class MatAdd(MatrixExpr):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like SymPy Add
>>> from sympy import MatAdd, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
obj = Basic.__new__(cls, *args)
if check:
validate(*args)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Add(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from trace import Trace
return MatAdd(*[Trace(arg) for arg in self.args]).doit()
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
factor_of = lambda arg: arg.as_coeff_mmul()[0]
matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint
>>> from sympy.matrices.expressions.matadd import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X)
A + [1 0] + [1 2]
[ ] [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X))
A + [2 2]
[ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(*rules)))
|
bsd-3-clause
| -7,329,370,434,409,728,000
| 27.736364
| 81
| 0.595698
| false
| 3.508324
| false
| false
| false
|
robwebset/script.pinsentry
|
resources/lib/database.py
|
1
|
18767
|
# -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
import xbmcvfs
import sqlite3
import xbmcgui
# Import the common settings
from settings import log
from settings import os_path_join
ADDON = xbmcaddon.Addon(id='script.pinsentry')
#################################
# Class to handle database access
#################################
class PinSentryDB():
def __init__(self):
# Start by getting the database location
self.configPath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
self.databasefile = os_path_join(self.configPath, "pinsentry_database.db")
log("PinSentryDB: Database file location = %s" % self.databasefile)
# Check to make sure the DB has been created
self._createDatabase()
# Removes the database if it exists
def cleanDatabase(self):
msg = "%s%s" % (ADDON.getLocalizedString(32113), "?")
isYes = xbmcgui.Dialog().yesno(ADDON.getLocalizedString(32001), msg)
if isYes:
# If the database file exists, delete it
if xbmcvfs.exists(self.databasefile):
xbmcvfs.delete(self.databasefile)
log("PinSentryDB: Removed database: %s" % self.databasefile)
else:
log("PinSentryDB: No database exists: %s" % self.databasefile)
# Creates the database if the file does not already exist
def _createDatabase(self):
# Make sure the database does not already exist
if not xbmcvfs.exists(self.databasefile):
# Get a connection to the database, this will create the file
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
c = conn.cursor()
# Create the version number table, this is a simple table
# that just holds the version details of what created it
# It should make upgrade later easier
c.execute('''CREATE TABLE version (version text primary key)''')
# Insert a row for the version
versionNum = "6"
# Run the statement passing in an array with one value
c.execute("INSERT INTO version VALUES (?)", (versionNum,))
# Create a table that will be used to store each Video and its access level
# The "id" will be auto-generated as the primary key
# Note: Index will automatically be created for "unique" values, so no
# need to manually create them
c.execute('''CREATE TABLE TvShows (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE Movies (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE MovieSets (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE Plugins (id integer primary key, name text unique, dbid text unique, level integer)''')
c.execute('''CREATE TABLE Repositories (id integer primary key, name text unique, dbid text unique, level integer)''')
# This is in version 2
c.execute('''CREATE TABLE MusicVideos (id integer primary key, name text unique, dbid integer unique, level integer)''')
# This is in version 3
c.execute('''CREATE TABLE FileSources (id integer primary key, name text unique, dbid text unique, level integer)''')
# This is in version 4
c.execute('''CREATE TABLE ClassificationsMovies (id integer primary key, name text unique, dbid text, level integer)''')
c.execute('''CREATE TABLE ClassificationsTV (id integer primary key, name text unique, dbid text, level integer)''')
# This is in version 6
c.execute('''CREATE TABLE TvChannels (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# Creates or DB if it does not exist, or updates it if it does already exist
def createOrUpdateDB(self):
if not xbmcvfs.exists(self.databasefile):
# No database created yet - nothing to do
self._createDatabase()
return
# The database was already created, check to see if they need to be updated
# Check if this is an upgrade
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT * FROM version')
currentVersion = int(c.fetchone()[0])
log("PinSentryDB: Current version number in DB is: %d" % currentVersion)
# If the database is at version one, add the version 2 tables
if currentVersion < 2:
log("PinSentryDB: Updating to version 2")
# Add the tables that were added in version 2
c.execute('''CREATE TABLE MusicVideos (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Update the new version of the database
currentVersion = 2
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version two, add the version 3 tables
if currentVersion < 3:
log("PinSentryDB: Updating to version 3")
# Add the tables that were added in version 3
c.execute('''CREATE TABLE FileSources (id integer primary key, name text unique, dbid text unique, level integer)''')
# Update the new version of the database
currentVersion = 3
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version three, add the version 4 tables
if currentVersion < 4:
log("PinSentryDB: Updating to version 4")
# Add the tables that were added in version 4
c.execute('''CREATE TABLE ClassificationsMovies (id integer primary key, name text unique, dbid text, level integer)''')
c.execute('''CREATE TABLE ClassificationsTV (id integer primary key, name text unique, dbid text, level integer)''')
# Update the new version of the database
currentVersion = 4
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version four, add the version 5 tables
if currentVersion < 5:
log("PinSentryDB: Updating to version 5")
# Add the tables that were added in version 5
c.execute('''CREATE TABLE Repositories (id integer primary key, name text unique, dbid text unique, level integer)''')
# Update the new version of the database
currentVersion = 5
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version five, add the version 6 tables
if currentVersion < 6:
log("PinSentryDB: Updating to version 6")
# Add the tables that were added in version 6
c.execute('''CREATE TABLE TvChannels (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Update the new version of the database
currentVersion = 6
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
conn.close()
# Get a connection to the current database
def getConnection(self):
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
return conn
# Set the security value for a given TvShow
def setTvShowSecurityLevel(self, showName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("TvShows", showName, dbid, level)
else:
self._deleteSecurityDetails("TvShows", showName)
return ret
# Set the security value for a given Movie
def setMovieSecurityLevel(self, movieName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Movies", movieName, dbid, level)
else:
self._deleteSecurityDetails("Movies", movieName)
return ret
# Set the security value for a given Movie Set
def setMovieSetSecurityLevel(self, movieSetName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("MovieSets", movieSetName, dbid, level)
else:
self._deleteSecurityDetails("MovieSets", movieSetName)
return ret
# Set the security value for a given Plugin
def setPluginSecurityLevel(self, pluginName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Plugins", pluginName, dbid, level)
else:
self._deleteSecurityDetails("Plugins", pluginName)
return ret
# Set the security value for a given Repository
def setRepositorySecurityLevel(self, repoName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Repositories", repoName, dbid, level)
else:
self._deleteSecurityDetails("Repositories", repoName)
return ret
# Set the security value for a given Music Video
def setMusicVideoSecurityLevel(self, musicVideoName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("MusicVideos", musicVideoName, dbid, level)
else:
self._deleteSecurityDetails("MusicVideos", musicVideoName)
return ret
# Set the security value for a given File Source
def setFileSourceSecurityLevel(self, sourceName, sourcePath, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("FileSources", sourceName, sourcePath, level)
else:
self._deleteSecurityDetails("FileSources", sourceName)
return ret
# Set the security value for a given Movie Classification
def setMovieClassificationSecurityLevel(self, id, match, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("ClassificationsMovies", id, match, level)
else:
self._deleteSecurityDetails("ClassificationsMovies", id)
return ret
# Set the security value for a given TV Classification
def setTvClassificationSecurityLevel(self, id, match, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("ClassificationsTV", id, match, level)
else:
self._deleteSecurityDetails("ClassificationsTV", id)
return ret
# Set the security value for a given TV Channel
def setTvChannelSecurityLevel(self, channelName, id, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("TvChannels", channelName, id, level)
else:
self._deleteSecurityDetails("TvChannels", channelName)
return ret
# Insert or replace an entry in the database
def _insertOrUpdate(self, tableName, name, dbid, level=1):
log("PinSentryDB: Adding %s %s (id:%s) at level %d" % (tableName, name, str(dbid), level))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
insertData = (name, dbid, level)
cmd = 'INSERT OR REPLACE INTO %s (name, dbid, level) VALUES (?,?,?)' % tableName
c.execute(cmd, insertData)
rowId = c.lastrowid
conn.commit()
conn.close()
return rowId
# Delete an entry from the database
def _deleteSecurityDetails(self, tableName, name):
log("PinSentryDB: delete %s for %s" % (tableName, name))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Delete any existing data from the database
cmd = 'DELETE FROM %s where name = ?' % tableName
c.execute(cmd, (name,))
conn.commit()
log("PinSentryDB: delete for %s removed %d rows" % (name, conn.total_changes))
conn.close()
# Get the security value for a given TvShow
def getTvShowSecurityLevel(self, showName):
return self._getSecurityLevel("TvShows", showName)
# Get the security value for a given Movie
def getMovieSecurityLevel(self, movieName):
return self._getSecurityLevel("Movies", movieName)
# Get the security value for a given Movie Set
def getMovieSetSecurityLevel(self, movieSetName):
return self._getSecurityLevel("MovieSets", movieSetName)
# Get the security value for a given Plugin
def getPluginSecurityLevel(self, pluginName):
return self._getSecurityLevel("Plugins", pluginName)
# Get the security value for a given Repository
def getRepositorySecurityLevel(self, pluginName):
return self._getSecurityLevel("Repositories", pluginName)
# Get the security value for a given Music Video
def getMusicVideoSecurityLevel(self, musicVideoName):
return self._getSecurityLevel("MusicVideos", musicVideoName)
# Get the security value for a given File Source
def getFileSourceSecurityLevel(self, sourceName):
return self._getSecurityLevel("FileSources", sourceName)
# Select the security entry from the database for a given File Source Path
def getFileSourceSecurityLevelForPath(self, path):
return self._getSecurityLevel("FileSources", path, 'dbid')
# Get the security value for a given Movie Classification
def getMovieClassificationSecurityLevel(self, className):
return self._getSecurityLevel("ClassificationsMovies", className, 'dbid')
# Get the security value for a given TV Classification
def getTvClassificationSecurityLevel(self, className):
return self._getSecurityLevel("ClassificationsTV", className, 'dbid')
# Get the security value for a given TV Channel
def getTvChannelsSecurityLevel(self, channelName):
return self._getSecurityLevel("TvChannels", channelName)
# Select the security entry from the database
def _getSecurityLevel(self, tableName, name, dbField='name'):
log("PinSentryDB: select %s for %s (dbField=%s)" % (tableName, name, dbField))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Select any existing data from the database
cmd = 'SELECT * FROM %s where %s = ?' % (tableName, dbField)
c.execute(cmd, (name,))
row = c.fetchone()
securityLevel = 0
if row is None:
log("PinSentryDB: No entry found in the database for %s" % name)
# Not stored in the database so return 0 for no pin required
else:
log("PinSentryDB: Database info: %s" % str(row))
# Return will contain
# row[0] - Unique Index in the DB
# row[1] - Name of the TvShow/Movie/MovieSet
# row[2] - dbid
# row[3] - Security Level
securityLevel = row[3]
conn.close()
return securityLevel
# Select all TvShow entries from the database
def getAllTvShowsSecurity(self):
return self._getAllSecurityDetails("TvShows")
# Select all Movie entries from the database
def getAllMoviesSecurity(self):
return self._getAllSecurityDetails("Movies")
# Select all Movie Set entries from the database
def getAllMovieSetsSecurity(self):
return self._getAllSecurityDetails("MovieSets")
# Select all Plugin entries from the database
def getAllPluginsSecurity(self):
return self._getAllSecurityDetails("Plugins")
# Select all Plugin entries from the database
def getAllRepositoriesSecurity(self):
return self._getAllSecurityDetails("Repositories")
# Select all Music Video entries from the database
def getAllMusicVideosSecurity(self):
return self._getAllSecurityDetails("MusicVideos")
# Select all File Sources entries from the database
def getAllFileSourcesSecurity(self):
return self._getAllSecurityDetails("FileSources")
# Get All File Source Paths entries from the database
def getAllFileSourcesPathsSecurity(self):
# The path is stored in the ID column, so use that as the key
return self._getAllSecurityDetails("FileSources", keyCol=2)
# Get All Movie Classification entries from the database
def getAllMovieClassificationSecurity(self, useCertKey=False):
keyCol = 1
if useCertKey:
keyCol = 2
return self._getAllSecurityDetails("ClassificationsMovies", keyCol)
# Get All TV Classification entries from the database
def getAllTvClassificationSecurity(self, useCertKey=False):
keyCol = 1
if useCertKey:
keyCol = 2
return self._getAllSecurityDetails("ClassificationsTV", keyCol)
# Get All File Source Paths entries from the database
def getAllTvChannelsSecurity(self):
# The path is stored in the ID column, so use that as the key
return self._getAllSecurityDetails("TvChannels")
# Select all security details from a given table in the database
def _getAllSecurityDetails(self, tableName, keyCol=1):
log("PinSentryDB: select all %s" % tableName)
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Select any existing data from the database
cmd = 'SELECT * FROM %s' % tableName
c.execute(cmd)
rows = c.fetchall()
resultDict = {}
if rows is None:
# No data
log("PinSentryDB: No entry found in TvShow database")
else:
log("PinSentryDB: Database info: %s" % str(rows))
# Return will contain
# row[0] - Unique Index in the DB
# row[1] - Name of the TvShow/Movie/MovieSet
# row[2] - dbid
# row[3] - Security Level
for row in rows:
name = row[keyCol]
resultDict[name] = row[3]
conn.close()
return resultDict
|
gpl-2.0
| 6,275,682,289,483,418,000
| 40.519912
| 132
| 0.6326
| false
| 4.349247
| false
| false
| false
|
josephsuh/extra-specs
|
nova/tests/api/openstack/compute/contrib/test_keypairs.py
|
1
|
11623
|
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
QUOTAS = quota.QUOTAS
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
pass
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_get(context, user_id, name):
pass
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_invalid_name(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {'keypair': {'name': 'foo'}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_get", db_key_pair_get)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeyPairNotFound()
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(fakes.wsgi_app())
print res
self.assertEqual(res.status_int, 404)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
|
apache-2.0
| 210,862,576,793,496,740
| 35.550314
| 78
| 0.580573
| false
| 3.445894
| true
| false
| false
|
eevee/flax
|
flax/ui/console/util.py
|
1
|
3170
|
"""Utility widgets, not really specific to the game."""
import sys
import urwid
class LogWidget(urwid.ListBox):
# Can't receive focus on its own; assumed that some parent widget will
# worry about scrolling us
_selectable = False
def __init__(self):
super().__init__(urwid.SimpleListWalker([]))
def add_log_line(self, line):
text = urwid.Text(('log-game', line))
self.body.append(text)
self.focus_position = len(self.body) - 1
class ToggleableOverlay(urwid.Overlay):
"""An Overlay where the top widget can be swapped out or hidden entirely.
If the top widget is removed, focus passes to the bottom widget.
"""
def __init__(self, bottom_w):
super().__init__(
None, bottom_w,
# These get replaced every time; just need some sane defaults
align='center', valign='middle', height='pack', width='pack',
)
def selectable(self):
return self.focus.selectable()
def keypress(self, size, key):
if self.top_w:
return super().keypress(size, key)
else:
return self.bottom_w.keypress(size, key)
@property
def focus(self):
if self.top_w:
return self.top_w
else:
return self.bottom_w
@property
def focus_position(self):
if self.top_w:
return 1
else:
return 0
@focus_position.setter
def focus_position(self, position):
if position == 0:
self.top_w = None
else:
super().focus_position = position
# TODO override `contents` to return a 1-element thing
def render(self, size, focus=False):
if self.top_w:
return super().render(size, focus)
else:
return self.bottom_w.render(size, focus)
### New APIs
def _close_handler(self, widget, *args):
urwid.disconnect_signal(widget, 'close-overlay', self._close_handler)
if self._onclose:
self._onclose(*args)
self.change_overlay(None)
_onclose = None
def change_overlay(self, widget, onclose=None, **kwargs):
self._onclose = onclose
if widget:
urwid.disconnect_signal(widget, 'close-overlay', self._close_handler)
urwid.connect_signal(widget, 'close-overlay', self._close_handler)
if 'box' in widget.sizing():
# A box is probably a popup, so center it
defaults = dict(
align='center',
valign='middle',
width=('relative', 90),
height=('relative', 90),
)
else:
# Otherwise it's probably a prompt or something, so stick it at
# the bottom
defaults = dict(
align='left',
valign='bottom',
width=('relative', 100),
height='pack',
)
defaults.update(kwargs)
self.set_overlay_parameters(**defaults)
self.top_w = widget
self._invalidate()
|
mit
| -6,422,756,433,398,865,000
| 27.558559
| 81
| 0.543218
| false
| 4.324693
| false
| false
| false
|
Kismuz/btgym
|
btgym/research/strategy_gen_5/base.py
|
1
|
37905
|
###############################################################################
#
# Copyright (C) 2017-2018 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import backtrader as bt
import backtrader.indicators as btind
from gym import spaces
from btgym import DictSpace
import numpy as np
from collections import deque
from btgym.strategy.utils import norm_value, decayed_result, exp_scale
############################## Base BTgymStrategy Class ###################
class BaseStrategy5(bt.Strategy):
"""
'New and improved' base startegy class.
Incorporates state declaration and preprocessing improvements.
Current candidate to replace current BTgymBaseStrategy.
Controls Environment inner dynamics and backtesting logic. Provides gym'my (State, Action, Reward, Done, Info) data.
Any State, Reward and Info computation logic can be implemented by subclassing BTgymStrategy and overriding
get_[mode]_state(), get_reward(), get_info(), is_done() and set_datalines() methods.
One can always go deeper and override __init__ () and next() methods for desired
server cerebro engine behaviour, including order execution logic etc.
Note:
- base class supports single asset iteration via default data_line named 'base_asset', see derived classes
multi-asset support
- bt.observers.DrawDown observer will be automatically added to BTgymStrategy instance at runtime.
- Since it is bt.Strategy subclass, refer to https://www.backtrader.com/docu/strategy.html for more information.
"""
# Time embedding period:
time_dim = 4 # NOTE: changed this --> change Policy UNREAL for aux. pix control task upsampling params
# Number of timesteps reward estimation statistics are averaged over, should be:
# skip_frame_period <= avg_period <= time_embedding_period:
avg_period = time_dim
# Possible agent actions; Note: place 'hold' first! :
portfolio_actions = ('hold', 'buy', 'sell', 'close')
features_parameters = ()
num_features = len(features_parameters)
params = dict(
# Observation state shape is dictionary of Gym spaces,
# at least should contain `raw_state` field.
# By convention first dimension of every Gym Box space is time embedding one;
# one can define any shape; should match env.observation_space.shape.
# observation space state min/max values,
# For `raw_state' (default) - absolute min/max values from BTgymDataset will be used.
state_shape={
'raw': spaces.Box(
shape=(time_dim, 4),
low=0, # will get overridden.
high=0,
dtype=np.float32,
),
'metadata': DictSpace(
{
'type': spaces.Box(
shape=(),
low=0,
high=1,
dtype=np.uint32
),
'trial_num': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'trial_type': spaces.Box(
shape=(),
low=0,
high=1,
dtype=np.uint32
),
'sample_num': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'first_row': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'timestamp': spaces.Box(
shape=(),
low=0,
high=np.finfo(np.float64).max,
dtype=np.float64
),
}
)
},
cash_name='default_cash',
asset_names=['default_asset'],
start_cash=None,
commission=None,
slippage=None,
leverage=1.0,
gamma=0.99, # fi_gamma, should match MDP gamma decay
reward_scale=1, # reward multiplicator
drawdown_call=10, # finish episode when hitting drawdown treshghold , in percent.
target_call=10, # finish episode when reaching profit target, in percent.
dataset_stat=None, # Summary descriptive statistics for entire dataset and
episode_stat=None, # current episode. Got updated by server.
time_dim=time_dim, # time embedding period
avg_period=avg_period, # number of time steps reward estimation statistics are averaged over
features_parameters=features_parameters,
num_features=num_features,
metadata={},
broadcast_message={},
trial_stat=None,
trial_metadata=None,
portfolio_actions=portfolio_actions,
skip_frame=1, # number of environment steps to skip before returning next environment response
order_size=None,
initial_action=None,
initial_portfolio_action=None,
state_int_scale=1,
state_ext_scale=1,
)
def __init__(self, **kwargs):
"""
Keyword Args:
params (dict): parameters dictionary, see Note below.
Notes:
Due to backtrader convention, any strategy arguments should be defined inside `params` dictionary
or passed as kwargs to bt.Cerebro() class via .addstrategy() method. Parameter dictionary
should contain at least these keys::
state_shape: Observation state shape is dictionary of Gym spaces, by convention
first dimension of every Gym Box space is time embedding one;
cash_name: str, name for cash asset
asset_names: iterable of str, names for assets
start_cash: float, broker starting cash
commission: float, broker commission value, .01 stands for 1%
leverage: float, broker leverage
slippage: float, broker execution slippage
order_size: dict of fixed order stakes (floats); keys should match assets names.
drawdown_call: finish episode when hitting this drawdown treshghold , in percent.
target_call: finish episode when reaching this profit target, in percent.
portfolio_actions: possible agent actions.
skip_frame: number of environment steps to skip before returning next response,
e.g. if set to 10 -- agent will interact with environment every 10th step;
every other step agent action is assumed to be 'hold'.
Default values are::
state_shape=dict(raw_state=spaces.Box(shape=(4, 4), low=0, high=0,))
cash_name='default_cash'
asset_names=['default_asset']
start_cash=None
commission=None
slippage=None,
leverage=1.0
drawdown_call=10
target_call=10
dataset_stat=None
episode_stat=None
portfolio_actions=('hold', 'buy', 'sell', 'close')
skip_frame=1
order_size=None
"""
# Inherit logger from cerebro:
self.log = self.env._log
self.skip_frame = self.p.skip_frame
self.iteration = 0
self.env_iteration = 0
self.inner_embedding = 1
self.is_done = False
self.is_done_enabled = False
self.steps_till_is_done = 2 # extra steps to make when episode terminal conditions are met
self.action = self.p.initial_portfolio_action
self.action_to_repeat = self.p.initial_portfolio_action
self.action_repeated = 0
self.num_action_repeats = None
self.reward = 0
self.order = None
self.order_failed = 0
self.broker_message = '_'
self.final_message = '_'
self.raw_state = None
self.time_stamp = 0
# Configure state_shape:
if self.p.state_shape is None:
self.p.state_shape = self.set_state_shape()
# Prepare broker:
if self.p.start_cash is not None:
self.env.broker.setcash(self.p.start_cash)
if self.p.commission is not None:
self.env.broker.setcommission(commission=self.p.commission, leverage=self.p.leverage)
if self.p.slippage is not None:
# Bid/ask workaround: set overkill 10% slippage + slip_out=False
# ensuring we always buy at current 'high'~'ask' and sell at 'low'~'bid':
self.env.broker.set_slippage_perc(self.p.slippage, slip_open=True, slip_match=True, slip_out=False)
# Normalisation constant for statistics derived from account value:
self.broker_value_normalizer = 1 / \
self.env.broker.startingcash / (self.p.drawdown_call + self.p.target_call) * 100
self.target_value = self.env.broker.startingcash * (1 + self.p.target_call / 100)
# Try to define stake, if no self.p.order_size dict has been set:
if self.p.order_size is None:
# If no order size has been set for every data_line,
# try to infer stake size from sizer set by bt.Cerebro.addsizer() method:
try:
assert len(list(self.env.sizers.values())) == 1
env_sizer_params = list(self.env.sizers.values())[0][-1] # pull dict of outer set sizer params
assert 'stake' in env_sizer_params.keys()
except (AssertionError, KeyError) as e:
msg = 'Order stake is not set neither via strategy.param.order_size nor via bt.Cerebro.addsizer method.'
self.log.error(msg)
raise ValueError(msg)
self.p.order_size = {name: env_sizer_params['stake'] for name in self.p.asset_names}
elif isinstance(self.p.order_size, int) or isinstance(self.p.order_size, float):
unimodal_stake = {name: self.p.order_size for name in self.getdatanames()}
self.p.order_size = unimodal_stake
# self.log.warning('asset names: {}'.format(self.p.asset_names))
# self.log.warning('data names: {}'.format(self.getdatanames()))
self.trade_just_closed = False
self.trade_result = 0
self.unrealized_pnl = None
self.norm_broker_value = None
self.realized_pnl = None
self.current_pos_duration = 0
self.current_pos_min_value = 0
self.current_pos_max_value = 0
self.realized_broker_value = self.env.broker.startingcash
self.episode_result = 0 # not used
# Service sma to get correct first features values:
self.data.dim_sma = btind.SimpleMovingAverage(
self.datas[0],
period=self.p.time_dim
)
self.data.dim_sma.plotinfo.plot = False
# self.log.warning('self.p.dir: {}'.format(dir(self.params)))
# Episode-wide metadata:
self.metadata = {
'type': np.asarray(self.p.metadata['type']),
'trial_num': np.asarray(self.p.metadata['parent_sample_num']),
'trial_type': np.asarray(self.p.metadata['parent_sample_type']),
'sample_num': np.asarray(self.p.metadata['sample_num']),
'first_row': np.asarray(self.p.metadata['first_row']),
'timestamp': np.asarray(self.time_stamp, dtype=np.float64)
}
self.state = {
'raw': None,
'metadata': None
}
# If it is train or test episode?
# default logic: true iff. it is test episode from target domain:
self.is_test = self.metadata['type'] and self.metadata['trial_type']
# This flag shows to the outer world if this episode can broadcast world-state information, e.g. move global
# time forward (see: btgym.server._BTgymAnalyzer.next() method);
self.can_broadcast = self.is_test
self.log.debug('strategy.metadata: {}'.format(self.metadata))
self.log.debug('is_test: {}'.format(self.is_test))
# Broker data lines of interest (used for estimation inner state of agent:
self.broker_datalines = [
'cash',
'value',
'exposure',
'drawdown',
'pos_duration',
'realized_pnl',
'unrealized_pnl',
'min_unrealized_pnl',
'max_unrealized_pnl',
'total_unrealized_pnl',
]
# Define flat collection dictionary looking up for methods for estimating broker statistics,
# one method for one mode, should be named .get_broker_[mode_name]():
self.collection_get_broker_stat_methods = {}
for line in self.broker_datalines:
try:
self.collection_get_broker_stat_methods[line] = getattr(self, 'get_broker_{}'.format(line))
except AttributeError:
raise NotImplementedError('Callable get_broker_{}.() not found'.format(line))
# Broker and account related sliding statistics accumulators, globally normalized last `avg_perod` values,
# so it's a bit more computationally efficient than use of bt.Observers:
self.broker_stat = {key: deque(maxlen=self.avg_period) for key in self.broker_datalines}
# Add custom data Lines if any (convenience wrapper):
self.set_datalines()
self.log.debug('Kwargs:\n{}\n'.format(str(kwargs)))
# Define flat collection dictionary looking for methods for estimating observation state,
# one method for one mode, should be named .get_[mode_name]_state():
self.collection_get_state_methods = {}
for key in self.p.state_shape.keys():
try:
self.collection_get_state_methods[key] = getattr(self, 'get_{}_state'.format(key))
except AttributeError:
raise NotImplementedError('Callable get_{}_state.() not found'.format(key))
for data in self.datas:
self.log.debug('data_name: {}'.format(data._name))
self.log.debug('stake size: {}'.format(self.p.order_size))
# Define how this strategy should handle actions: either as discrete or continuous:
if self.p.portfolio_actions is None or set(self.p.portfolio_actions) == {}:
# No discrete actions provided, assume continuous:
try:
assert self.p.skip_frame > 1
except AssertionError:
msg = 'For continuous actions it is essential to set `skip_frame` parameter > 1, got: {}'.format(
self.p.skip_frame
)
self.log.error(msg)
raise ValueError(msg)
# Disable broker checking margin,
# see: https://community.backtrader.com/topic/152/multi-asset-ranking-and-rebalancing/2?page=1
self.env.broker.set_checksubmit(False)
self.next_process_fn = self._next_target_percent
# Repeat action 2 times:
self.num_action_repeats = 2
else:
# Use discrete handling method otherwise:
self.env.broker.set_checksubmit(True)
self.next_process_fn = self._next_discrete
# self.log.warning('DISCRETE')
# Do not repeat action for discrete:
self.num_action_repeats = 0
def prenext(self):
self.update_broker_stat()
def nextstart(self):
self.inner_embedding = self.data.close.buflen()
self.log.debug('Inner time embedding: {}'.format(self.inner_embedding))
def next(self):
"""
Default implementation for built-in backtrader method.
Defines one step environment routine;
Handles order execution logic according to action received.
Note that orders can only be submitted for data_lines in action_space (assets).
`self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions
are emitted while doing `skip_frame` loop.
"""
self.update_broker_stat()
if '_skip_this' in self.action.keys():
# print('a_skip, b_message: ', self.broker_message)
if self.action_repeated < self.num_action_repeats:
self.next_process_fn(self.action_to_repeat)
self.action_repeated += 1
else:
self.next_process_fn(self.action)
self.action_repeated = 0
self.action_to_repeat = self.action
# print('a_process, b_message: ', self.broker_message)
def notify_trade(self, trade):
if trade.isclosed:
# Set trade flags: True if trade have been closed just now and within last frame-skip period,
# and store trade result:
self.trade_just_closed = True
# Note: `trade_just_closed` flag has to be reset manually after evaluating.
self.trade_result += trade.pnlcomm
# Store realized prtfolio value:
self.realized_broker_value = self.broker.get_value()
# self.log.warning('notify_trade: trade_pnl: {}, cum_trade_result: {}, realized_value: {}'.format(
# trade.pnlcomm, self.trade_result, self.realized_broker_value)
# )
def update_broker_stat(self):
"""
Updates all sliding broker statistics deques with latest-step values such as:
- normalized broker value
- normalized broker cash
- normalized exposure (position size)
- exp. scaled episode duration in steps, normalized wrt. max possible episode steps
- normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
- normalized profit/loss for current opened trade (unrealized p/l);
"""
# Current account value:
current_value = self.env.broker.get_value()
# Individual positions for each instrument traded:
positions = [self.env.broker.getposition(data) for data in self.datas]
exposure = sum([abs(pos.size) for pos in positions])
for key, method in self.collection_get_broker_stat_methods.items():
self.broker_stat[key].append(
method(
current_value=current_value,
positions=positions,
exposure=exposure,
)
)
# Reset one-time flags:
self.trade_just_closed = False
self.trade_result = 0
def get_broker_value(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized broker value.
"""
return norm_value(
current_value,
self.env.broker.startingcash,
self.p.drawdown_call,
self.p.target_call,
)
def get_broker_cash(self, **kwargs):
"""
Returns:
normalized broker cash
"""
return norm_value(
self.env.broker.get_cash(),
self.env.broker.startingcash,
99.0,
self.p.target_call,
)
def get_broker_exposure(self, exposure, **kwargs):
"""
Returns:
normalized exposure (position size)
"""
return exposure / (self.env.broker.startingcash * self.env.broker.get_leverage() + 1e-2)
def get_broker_realized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
"""
if self.trade_just_closed:
pnl = decayed_result(
self.trade_result,
current_value,
self.env.broker.startingcash,
self.p.drawdown_call,
self.p.target_call,
gamma=1
)
# self.log.warning('get_broker_realized_pnl: got result: {} --> pnl: {}'.format(self.trade_result, pnl))
# Reset flag:
# self.trade_just_closed = False
# print('broker_realized_pnl: step {}, just closed.'.format(self.iteration))
else:
pnl = 0.0
return pnl
def get_broker_unrealized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized profit/loss for current opened trade
"""
return (current_value - self.realized_broker_value) * self.broker_value_normalizer
def get_broker_total_unrealized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized profit/loss wrt. initial portfolio value
"""
return (current_value - self.env.broker.startingcash) * self.broker_value_normalizer
def get_broker_episode_step(self, **kwargs):
"""
Returns:
exp. scaled episode duration in steps, normalized wrt. max possible episode steps
"""
return exp_scale(
self.iteration / (self.data.numrecords - self.inner_embedding),
gamma=3
)
def get_broker_drawdown(self, **kwargs):
"""
Returns:
current drawdown value
"""
try:
dd = self.stats.drawdown.drawdown[-1] # / self.p.drawdown_call
except IndexError:
dd = 0.0
return dd
def get_broker_pos_duration(self, exposure, **kwargs):
if exposure == 0:
self.current_pos_duration = 0
# print('ZERO_POSITION\n')
else:
self.current_pos_duration += 1
return self.current_pos_duration
def get_broker_max_unrealized_pnl(self, current_value, exposure, **kwargs):
if exposure == 0:
self.current_pos_max_value = current_value
else:
if self.current_pos_max_value < current_value:
self.current_pos_max_value = current_value
return (self.current_pos_max_value - self.realized_broker_value) * self.broker_value_normalizer
def get_broker_min_unrealized_pnl(self, current_value, exposure, **kwargs):
if exposure == 0:
self.current_pos_min_value = current_value
else:
if self.current_pos_min_value > current_value:
self.current_pos_min_value = current_value
return (self.current_pos_min_value - self.realized_broker_value) * self.broker_value_normalizer
def set_datalines(self):
"""
Default datalines are: Open, Low, High, Close, Volume.
Any other custom data lines, indicators, etc. should be explicitly defined by overriding this method.
Invoked once by Strategy.__init__().
"""
pass
def get_raw_state(self):
"""
Default state observation composer.
Returns:
and updates time-embedded environment state observation as [n,4] numpy matrix, where:
4 - number of signal features == state_shape[1],
n - time-embedding length == state_shape[0] == <set by user>.
Note:
`self.raw_state` is used to render environment `human` mode and should not be modified.
"""
self.raw_state = np.row_stack(
(
np.frombuffer(self.data.open.get(size=self.time_dim)),
np.frombuffer(self.data.high.get(size=self.time_dim)),
np.frombuffer(self.data.low.get(size=self.time_dim)),
np.frombuffer(self.data.close.get(size=self.time_dim)),
)
).T
return self.raw_state
def get_internal_state(self):
"""
Composes internal state tensor by calling all statistics from broker_stat dictionary.
Generally, this method should not be modified, implement corresponding get_broker_[mode]() methods.
"""
x_broker = np.concatenate(
[np.asarray(stat)[..., None] for stat in self.broker_stat.values()],
axis=-1
)
return x_broker[:, None, :]
def get_metadata_state(self):
self.metadata['timestamp'] = np.asarray(self._get_timestamp())
return self.metadata
def _get_time(self):
"""
Retrieves current time point of the episode data.
Returns:
datetime object
"""
return self.data.datetime.datetime()
def _get_timestamp(self):
"""
Sets attr. and returns current data timestamp.
Returns:
POSIX timestamp
"""
self.time_stamp = self._get_time().timestamp()
return self.time_stamp
def _get_broadcast_info(self):
"""
Transmits broadcasting message.
Returns:
dictionary or None
"""
try:
return self.get_broadcast_message()
except AttributeError:
return None
def get_broadcast_message(self):
"""
Override this.
Returns:
dictionary or None
"""
return None
def get_state(self):
"""
Collects estimated values for every mode of observation space by calling methods from
`collection_get_state_methods` dictionary.
As a rule, this method should not be modified, override or implement corresponding get_[mode]_state() methods,
defining necessary calculations and return arbitrary shaped tensors for every space mode.
Note:
- 'data' referes to bt.startegy datafeeds and should be treated as such.
Datafeed Lines that are not default to BTgymStrategy should be explicitly defined by
__init__() or define_datalines().
"""
# Update inner state statistic and compose state: <- moved to .next()
# self.update_broker_stat()
self.state = {key: method() for key, method in self.collection_get_state_methods.items()}
return self.state
def get_reward(self):
"""
Shapes reward function as normalized single trade realized profit/loss,
augmented with potential-based reward shaping functions in form of:
F(s, a, s`) = gamma * FI(s`) - FI(s);
Potential FI_1 is current normalized unrealized profit/loss.
Paper:
"Policy invariance under reward transformations:
Theory and application to reward shaping" by A. Ng et al., 1999;
http://www.robotics.stanford.edu/~ang/papers/shaping-icml99.pdf
"""
# All sliding statistics for this step are already updated by get_state().
# Potential-based shaping function 1:
# based on potential of averaged profit/loss for current opened trade (unrealized p/l):
unrealised_pnl = np.asarray(self.broker_stat['unrealized_pnl'])
current_pos_duration = self.broker_stat['pos_duration'][-1]
# We want to estimate potential `fi = gamma*fi_prime - fi` of current opened position,
# thus need to consider different cases given skip_fame parameter:
if current_pos_duration == 0:
# Set potential term to zero if there is no opened positions:
f1 = 0
fi_1_prime = 0
else:
if current_pos_duration < self.p.skip_frame:
fi_1 = 0
fi_1_prime = np.average(unrealised_pnl[-current_pos_duration:])
elif current_pos_duration < 2 * self.p.skip_frame:
fi_1 = np.average(
unrealised_pnl[-(self.p.skip_frame + current_pos_duration):-self.p.skip_frame]
)
fi_1_prime = np.average(unrealised_pnl[-self.p.skip_frame:])
else:
fi_1 = np.average(
unrealised_pnl[-2 * self.p.skip_frame:-self.p.skip_frame]
)
fi_1_prime = np.average(unrealised_pnl[-self.p.skip_frame:])
# Potential term:
f1 = self.p.gamma * fi_1_prime - fi_1
# Main reward function: normalized realized profit/loss:
realized_pnl = np.asarray(self.broker_stat['realized_pnl'])[-self.p.skip_frame:].sum()
# Weights are subject to tune:
self.reward = (10.0 * f1 + 10.0 * realized_pnl) * self.p.reward_scale
self.reward = np.clip(self.reward, -self.p.reward_scale, self.p.reward_scale)
return self.reward
def get_info(self):
"""
Composes information part of environment response,
can be any object. Override to own taste.
Note:
Due to 'skip_frame' feature, INFO part of environment response transmitted by server can be a list
containing either all skipped frame's info objects, i.e. [info[-9], info[-8], ..., info[0]] or
just latest one, [info[0]]. This behaviour is set inside btgym.server._BTgymAnalyzer().next() method.
"""
return dict(
step=self.iteration,
time=self.data.datetime.datetime(),
action=self.action,
broker_message=self.broker_message,
broker_cash=self.stats.broker.cash[0],
broker_value=self.stats.broker.value[0],
drawdown=self.stats.drawdown.drawdown[0],
max_drawdown=self.stats.drawdown.maxdrawdown[0],
)
def get_done(self):
"""
Episode termination estimator,
defines any trading logic conditions episode stop is called upon, e.g. <OMG! Stop it, we became too rich!>.
It is just a structural a convention method. Default method is empty.
Expected to return:
tuple (<is_done, type=bool>, <message, type=str>).
"""
return False, '-'
def _get_done(self):
"""
Default episode termination method,
checks base conditions episode stop is called upon:
1. Reached maximum episode duration. Need to check it explicitly, because <self.is_done> flag
is sent as part of environment response.
2. Got '_done' signal from outside. E.g. via env.reset() method invoked by outer RL algorithm.
3. Hit drawdown threshold.
4. Hit target profit threshold.
This method shouldn't be overridden or called explicitly.
Runtime execution logic is:
terminate episode if:
get_done() returned (True, 'something')
OR
ANY _get_done() default condition is met.
"""
if not self.is_done_enabled:
# Episode is on its way,
# apply base episode termination rules:
is_done_rules = [
# Do we approaching the end of the episode?:
(self.iteration >= \
self.data.numrecords - self.inner_embedding - self.p.skip_frame - self.steps_till_is_done,
'END OF DATA'),
# Any money left?:
(self.stats.drawdown.maxdrawdown[0] >= self.p.drawdown_call, 'DRAWDOWN CALL'),
# Party time?
(self.env.broker.get_value() > self.target_value, 'TARGET REACHED'),
]
# Append custom get_done() results, if any:
is_done_rules += [self.get_done()]
# Sweep through rules:
for (condition, message) in is_done_rules:
if condition:
# Start episode termination countdown for clean exit:
# to forcefully execute final `close` order and compute proper reward
# we need to make `steps_till_is_done` number of steps until `is_done` flag can be safely risen:
self.is_done_enabled = True
self.broker_message += message
self.final_message = message
self.order = self.close()
self.log.debug(
'Episode countdown started at: {}, {}, r:{}'.format(self.iteration, message, self.reward)
)
else:
# Now in episode termination phase,
# just keep hitting `Close` button:
self.steps_till_is_done -= 1
self.broker_message = 'CLOSE, {}'.format(self.final_message)
self.order = self.close()
self.log.debug(
'Episode countdown contd. at: {}, {}, r:{}'.format(self.iteration, self.broker_message, self.reward)
)
if self.steps_till_is_done <= 0:
# Now we've done, terminate:
self.is_done = True
return self.is_done
def notify_order(self, order):
"""
Shamelessly taken from backtrader tutorial.
TODO: better multi data support
"""
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.broker_message = 'BUY executed,\nPrice: {:.5f}, Cost: {:.4f}, Comm: {:.4f}'. \
format(order.executed.price,
order.executed.value,
order.executed.comm)
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.broker_message = 'SELL executed,\nPrice: {:.5f}, Cost: {:.4f}, Comm: {:.4f}'. \
format(order.executed.price,
order.executed.value,
order.executed.comm)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.broker_message = 'ORDER FAILED with status: ' + str(order.getstatusname())
# Rise order_failed flag until get_reward() will [hopefully] use and reset it:
self.order_failed += 1
# self.log.warning('BM: {}'.format(self.broker_message))
self.order = None
def _next_discrete(self, action):
"""
Default implementation for discrete actions.
Note that orders can be submitted only for data_lines in action_space (assets).
Args:
action: dict, string encoding of btgym.spaces.ActionDictSpace
"""
for key, single_action in action.items():
# Simple action-to-order logic:
if single_action == 'hold' or self.is_done_enabled:
pass
elif single_action == 'buy':
self.order = self.buy(data=key, size=self.p.order_size[key])
self.broker_message = 'new {}_BUY created; '.format(key) + self.broker_message
elif single_action == 'sell':
self.order = self.sell(data=key, size=self.p.order_size[key])
self.broker_message = 'new {}_SELL created; '.format(key) + self.broker_message
elif single_action == 'close':
self.order = self.close(data=key)
self.broker_message = 'new {}_CLOSE created; '.format(key) + self.broker_message
# Somewhere after this point, server-side _BTgymAnalyzer() is exchanging information with environment wrapper,
# obtaining <self.action> , composing and sending <state,reward,done,info> etc... never mind.
def _next_target_percent(self, action):
"""
Uses `order_target_percent` method to rebalance assets to given ratios. Expects action for every asset to be
a float scalar in [0,1], with actions sum to 1 over all assets (including base one).
Note that action for base asset (cash) is ignored.
For details refer to: https://www.backtrader.com/docu/order_target/order_target.html
"""
# TODO 1: filter similar actions to prevent excessive orders issue e.g by DKL on two consecutive ones
# TODO 2: actions discretesation on level of execution
for asset in self.p.asset_names:
# Reducing assets positions subj to 5% margin reserve:
single_action = round(float(action[asset]) * 0.9, 2)
self.order = self.order_target_percent(data=asset, target=single_action)
self.broker_message += ' new {}->{:1.0f}% created; '.format(asset, single_action * 100)
|
lgpl-3.0
| 1,417,447,790,621,117,000
| 39.889968
| 120
| 0.573407
| false
| 4.227166
| false
| false
| false
|
rbuffat/pyidf
|
tests/test_airflownetworkintrazonelinkage.py
|
1
|
1949
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkIntraZoneLinkage
log = logging.getLogger(__name__)
class TestAirflowNetworkIntraZoneLinkage(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkintrazonelinkage(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkIntraZoneLinkage()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_node_1_name = "Node 1 Name"
obj.node_1_name = var_node_1_name
# alpha
var_node_2_name = "Node 2 Name"
obj.node_2_name = var_node_2_name
# object-list
var_component_name = "object-list|Component Name"
obj.component_name = var_component_name
# object-list
var_airflownetworkmultizonesurface_name = "object-list|AirflowNetwork:MultiZone:Surface Name"
obj.airflownetworkmultizonesurface_name = var_airflownetworkmultizonesurface_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].name, var_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].node_1_name, var_node_1_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].node_2_name, var_node_2_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].component_name, var_component_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].airflownetworkmultizonesurface_name, var_airflownetworkmultizonesurface_name)
|
apache-2.0
| 4,970,112,966,095,272,000
| 35.111111
| 142
| 0.691637
| false
| 3.524412
| false
| false
| false
|
adobe-research/spark-cluster-deployment
|
initial-deployment-puppet/modules/spark/files/spark/examples/src/main/python/wordcount.py
|
1
|
1306
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from operator import add
from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: wordcount <file>"
exit(-1)
sc = SparkContext(appName="PythonWordCount")
lines = sc.textFile(sys.argv[1], 1)
counts = lines.flatMap(lambda x: x.split(' ')) \
.map(lambda x: (x, 1)) \
.reduceByKey(add)
output = counts.collect()
for (word, count) in output:
print "%s: %i" % (word, count)
|
apache-2.0
| 8,318,567,125,479,649,000
| 36.314286
| 74
| 0.687596
| false
| 3.933735
| false
| false
| false
|
vlfedotov/django-business-logic
|
business_logic/rest/serializers.py
|
1
|
9163
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.utils import six
from rest_framework import serializers
from ..models import (ExceptionLog, Execution, ExecutionArgument, ExecutionEnvironment, FunctionDefinition,
FunctionLibrary, LogEntry, Program, ProgramArgument, ProgramArgumentField, ProgramInterface,
ProgramVersion, ReferenceDescriptor, FunctionArgument, FunctionArgumentChoice)
from ..models.types_ import TYPES_FOR_DJANGO_FIELDS, DJANGO_FIELDS_FOR_TYPES
from ..blockly.build import BlocklyXmlBuilder
from ..blockly.create import NodeTreeCreator
from ..blockly.parse import BlocklyXmlParser
def get_model_name(content_type):
return '{}.{}'.format(content_type.app_label, content_type.model_class().__name__)
def get_model_verbose_name(content_type):
return content_type.model_class()._meta.verbose_name
class ContentTypeSerializer(serializers.Serializer):
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
id = serializers.IntegerField()
def get_verbose_name(self, obj):
return get_model_verbose_name(obj)
def get_name(self, obj):
return get_model_name(obj)
class FunctionArgumentChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = FunctionArgumentChoice
fields = (
'value',
'title',
)
class FunctionArgumentSerializer(serializers.ModelSerializer):
choices = FunctionArgumentChoiceSerializer(many=True)
class Meta:
model = FunctionArgument
fields = ('name', 'description', 'choices')
class FunctionDefinitionSerializer(serializers.ModelSerializer):
arguments = FunctionArgumentSerializer(many=True)
class Meta:
model = FunctionDefinition
exclude = ('id', 'polymorphic_ctype')
class FunctionLibrarySerializer(serializers.ModelSerializer):
functions = FunctionDefinitionSerializer(many=True)
class Meta:
model = FunctionLibrary
exclude = ('id',)
class ExecutionEnvironmentSerializer(serializers.ModelSerializer):
libraries = FunctionLibrarySerializer(many=True)
class Meta:
model = ExecutionEnvironment
exclude = ('id',)
class ProgramInterfaceListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program-interface')
class Meta:
model = ProgramInterface
fields = '__all__'
class BlocklyXMLSerializer(serializers.CharField):
def to_representation(self, instance):
return BlocklyXmlBuilder().build(instance)
def to_internal_value(self, data):
return NodeTreeCreator().create(BlocklyXmlParser().parse(data)[0])
def run_validation(self, data=serializers.empty):
if data == '' or (self.trim_whitespace and six.text_type(data).strip() == ''):
if not self.allow_blank:
self.fail('blank')
return ''
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
try:
BlocklyXmlParser().parse(data)
except Exception as e:
raise serializers.ValidationError(
["Xml parse error - {}: {}".format(e.__class__.__name__, six.text_type(e))])
value = self.to_internal_value(data)
self.run_validators(value)
return value
class ProgramSerializer(serializers.ModelSerializer):
environment = ExecutionEnvironmentSerializer(read_only=True)
class Meta:
model = Program
fields = '__all__'
class ProgramListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program')
class Meta:
model = Program
fields = '__all__'
class ProgramVersionListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program-version')
class Meta:
model = ProgramVersion
read_only_fields = ('is_default',)
exclude = ('entry_point',)
class ProgramVersionCreateSerializer(serializers.ModelSerializer):
xml = BlocklyXMLSerializer(source='entry_point', required=True)
id = serializers.IntegerField(read_only=True)
class Meta:
model = ProgramVersion
fields = ('title', 'description', 'xml', 'program', 'id')
class ProgramVersionSerializer(serializers.ModelSerializer):
xml = BlocklyXMLSerializer(source='entry_point', required=True)
program = serializers.PrimaryKeyRelatedField(read_only=True)
environment = ExecutionEnvironmentSerializer(read_only=True)
class Meta:
model = ProgramVersion
exclude = ('entry_point',)
class ReferenceDescriptorListSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
content_type = ContentTypeSerializer()
class Meta:
model = ReferenceDescriptor
exclude = ('title',)
def get_name(self, obj):
return get_model_name(obj.content_type)
def get_verbose_name(self, obj):
return obj.title or obj.content_type.model_class()._meta.verbose_name
def get_url(self, obj):
return reverse('business-logic:rest:reference-list', kwargs=dict(model=get_model_name(obj.content_type)))
class ReferenceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.SerializerMethodField()
def get_fields(self):
declared_fields = copy.deepcopy(self._declared_fields)
return declared_fields
def get_name(self, obj):
reference_descriptor = self.context['view'].get_reference_descriptor()
return six.text_type(getattr(obj, reference_descriptor.name_field) if reference_descriptor.name_field else obj)
class ProgramArgumentFieldSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramArgumentField
def to_representation(self, instance):
representation = {}
representation['name'] = instance.name
argument = instance.program_argument
model = argument.content_type.model_class()
field_names = instance.name.split('.')
for i, field_name in enumerate(field_names):
field = model._meta.get_field(field_name)
is_last_field = i == len(field_names) - 1
is_django_model = field.__class__ in DJANGO_FIELDS_FOR_TYPES['model']
if is_django_model:
model = field.related_model
if is_last_field:
representation['data_type'] = TYPES_FOR_DJANGO_FIELDS[field.__class__]
representation['content_type'] = (ContentTypeSerializer().to_representation(
ContentType.objects.get_for_model(model)) if is_django_model else None)
representation['verbose_name'] = instance.get_title()
return representation
class ProgramArgumentSerializer(serializers.ModelSerializer):
fields = ProgramArgumentFieldSerializer(many=True)
verbose_name = serializers.SerializerMethodField()
content_type = ContentTypeSerializer()
class Meta:
model = ProgramArgument
exclude = ('id', 'program_interface', 'variable_definition')
def get_verbose_name(self, obj):
return get_model_verbose_name(obj.content_type)
class ProgramInterfaceSerializer(serializers.ModelSerializer):
arguments = ProgramArgumentSerializer(many=True)
environment = ExecutionEnvironmentSerializer()
class Meta:
model = ProgramInterface
exclude = ('id',)
class ExecutionListSerializer(serializers.ModelSerializer):
class Meta:
model = Execution
exclude = ('log',)
class ExecutionArgumentSerializer(serializers.ModelSerializer):
content_type = ContentTypeSerializer()
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
class Meta:
model = ExecutionArgument
exclude = ('id', 'program_argument', 'execution')
def get_name(self, obj):
return obj.program_argument.name
def get_verbose_name(self, obj):
return get_model_verbose_name(obj.content_type)
class ExecutionSerializer(serializers.ModelSerializer):
arguments = ExecutionArgumentSerializer(many=True)
class Meta:
model = Execution
exclude = ('log',)
class ExceptionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ExceptionLog
exclude = ('log_entry', 'id')
class LogSerializer(serializers.ModelSerializer):
exception = ExceptionLogSerializer()
class Meta:
model = LogEntry
exclude = ('sib_order', 'parent', 'id')
def get_fields(self):
fields = super(LogSerializer, self).get_fields()
fields['children'] = LogSerializer(many=True)
return fields
|
mit
| -2,308,939,436,610,275,000
| 29.543333
| 119
| 0.68973
| false
| 4.469756
| false
| false
| false
|
joelarmstrong/analysis-purgatory
|
splitting-top-down/plot.py
|
1
|
2309
|
#!/usr/bin/env python
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def main():
sns.set_style('ticks')
df = pd.read_csv('results.csv')
df['fraction_good_splits'] = (df['perfect_splits']/df['fraction_perfect_splits'] - df['mismatching_leaf_sets'] - df['flipped_splits']) / (df['perfect_splits']/df['fraction_perfect_splits'])
grid = sns.FacetGrid(df, size=5, row='evaluation_method',
row_order=['none', 'relaxed-split-decomposition', 'split-decomposition'],
hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition'],
aspect=16.0/9, legend_out=True)
grid.map(sns.boxplot, 'loss_rate', 'fraction_good_splits', 'cluster_method', palette='colorblind', hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition']).set_axis_labels('Loss rate (as fraction of substitution rate)', 'Fraction of true splits correctly split or unresolved')
legend = plt.legend(loc='center left', bbox_to_anchor=(1, 1.5))
sns.plt.savefig('varying_loss_rate.pdf', bbox_extra_artists=(legend,), bbox_inches='tight')
grid = sns.FacetGrid(df, size=5, row='evaluation_method',
row_order=['none', 'relaxed-split-decomposition', 'split-decomposition'],
hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition'],
aspect=16.0/9, legend_out=True)
grid.map(sns.boxplot, 'duplication_rate', 'fraction_good_splits', 'cluster_method', palette='colorblind', hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition']).set_axis_labels('Duplication rate (as fraction of substitution rate)', 'Fraction of true splits correctly split or unresolved')
legend = plt.legend(loc='center left', bbox_to_anchor=(1, 1.5))
sns.plt.savefig('varying_duplication_rate.pdf', bbox_extra_artists=(legend,), bbox_inches='tight')
print df.groupby(['cluster_method', 'evaluation_method']).sum().to_csv()
if __name__ == '__main__':
main()
|
mit
| 1,598,443,924,317,746,700
| 73.483871
| 372
| 0.658294
| false
| 3.400589
| false
| false
| false
|
vinodpanicker/scancode-toolkit
|
src/scancode/format.py
|
1
|
5795
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from collections import OrderedDict
from operator import itemgetter
from os.path import dirname
from os.path import exists
from os.path import join
from os.path import abspath
from os import makedirs
from commoncode import fileutils
"""
Format scans outputs.
"""
def get_html_template(format): # @ReservedAssignment
"""
Given a format string corresponding to a template directory, load and return
the template.html file found in that directory.
"""
from jinja2 import Environment, FileSystemLoader
templates_dir = get_template_dir(format)
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template('template.html')
return template
def get_template_dir(format): # @ReservedAssignment
"""
Given a format string return the corresponding template directory.
"""
return join(dirname(__file__), 'templates', format)
def as_html_app(scanned_path, output_file):
"""
Return an HTML string built from a list of results and the html-app template.
"""
template = get_html_template('html-app')
_, assets_dir = get_html_app_files_dirs(output_file)
return template.render(assets_dir=assets_dir, scanned_path=scanned_path)
class HtmlAppAssetCopyWarning(Exception):
pass
class HtmlAppAssetCopyError(Exception):
pass
def is_stdout(output_file):
return output_file.name == '<stdout>'
def get_html_app_files_dirs(output_file):
"""
Return a tuple of (parent_dir, dir_name) directory named after the
`output_file` file object file_base_name (stripped from extension) and a
`_files` suffix Return empty strings if output is to stdout.
"""
if is_stdout(output_file):
return '', ''
file_name = output_file.name
parent_dir = dirname(file_name)
dir_name = fileutils.file_base_name(file_name) + '_files'
return parent_dir, dir_name
def create_html_app_assets(results, output_file):
"""
Given an html-app output_file, create the corresponding `_files` directory
and copy the assets to this directory. The target directory is deleted if it
exists.
Raise HtmlAppAssetCopyWarning if the output_file is <stdout> or
HtmlAppAssetCopyError if the copy was not possible.
"""
try:
if is_stdout(output_file):
raise HtmlAppAssetCopyWarning()
assets_dir = join(get_template_dir('html-app'), 'assets')
tgt_dirs = get_html_app_files_dirs(output_file)
target_dir = join(*tgt_dirs)
if exists(target_dir):
fileutils.delete(target_dir)
fileutils.copytree(assets_dir, target_dir)
# write json data
import json
root_path, assets_dir = get_html_app_files_dirs(output_file)
with open(join(root_path, assets_dir, 'data.json'), 'w') as f:
f.write('data=' + json.dumps(results))
except HtmlAppAssetCopyWarning, w:
raise w
except Exception, e:
raise HtmlAppAssetCopyError(e)
def as_html(detected_data):
"""
Return an HTML string built from a list of results and the html template.
"""
template = get_html_template('html')
converted = OrderedDict()
licenses = {}
# Create a dict keyed by location
for scan_result in detected_data:
location = scan_result['location']
results = []
if 'copyrights' in scan_result:
for entry in scan_result['copyrights']:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'copyright',
# NOTE: we display one statement per line.
'value': '\n'.join(entry['statements']),
})
if 'licenses' in scan_result:
for entry in scan_result['licenses']:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'license',
'value': entry['key'],
})
if entry['key'] not in licenses:
licenses[entry['key']] = entry
if results:
converted[location] = sorted(results, key=itemgetter('start'))
licenses = OrderedDict(sorted(licenses.items()))
return template.render(results=converted, licenses=licenses)
|
apache-2.0
| 590,873,037,973,926,900
| 33.088235
| 82
| 0.663158
| false
| 4.083862
| false
| false
| false
|
daeilkim/refinery
|
refinery/bnpy/bnpy-dev/bnpy/init/FromTruth.py
|
1
|
2192
|
'''
FromTruth.py
Initialize params of a bnpy model using "ground truth" information,
such as human annotations
These are provided within a Data object, as a "TrueLabels" field
'''
import numpy as np
import FromScratchMult
def init_global_params(hmodel, Data, initname=None, seed=0, nRepeatTrue=2, **kwargs):
''' Initialize (in-place) the global params of the given hmodel
using the true labels associated with the Data
Args
-------
hmodel : bnpy model object to initialize
Data : bnpy Data object whose dimensions must match resulting hmodel
initname : string name for the routine to use
'truelabels' or 'repeattruelabels'
'''
PRNG = np.random.RandomState(seed)
if initname.count('truelabels') > 0:
if hasattr(Data, 'TrueLabels'):
resp = calc_resp_from_true_labels(Data)
elif hasattr(Data, 'TrueParams'):
if 'resp' in Data.TrueParams:
resp = Data.TrueParams['resp']
if 'word_variational' in Data.TrueParams:
resp = Data.TrueParams['word_variational']
if initname == 'truelabels':
pass # have everything we need
elif initname == 'repeattruelabels':
Ktrue = resp.shape[1]
rowIDs = PRNG.permutation(Data.nObs)
L = len(rowIDs)/nRepeatTrue
bigResp = np.zeros((Data.nObs, Ktrue*nRepeatTrue))
curLoc = 0
for r in range(nRepeatTrue):
targetIDs = rowIDs[curLoc:curLoc+L]
bigResp[targetIDs, r*Ktrue:(r+1)*Ktrue] = resp[targetIDs,:]
curLoc += L
resp = bigResp
elif initname == 'trueparams':
hmodel.set_global_params(**Data.TrueParams)
return
else:
raise NotImplementedError('Unknown initname: %s' % (initname))
if hmodel.obsModel.__class__.__name__.count('Gauss') > 0:
LP = dict(resp=resp)
else:
LP = FromScratchMult.getLPfromResp(resp, Data)
SS = hmodel.get_global_suff_stats(Data, LP)
hmodel.update_global_params(SS)
def calc_resp_from_true_labels(Data):
TrueLabels = Data.TrueLabels
uniqueLabels = np.unique(TrueLabels)
Ktrue = len(uniqueLabels)
resp = np.zeros((Data.nObs, Ktrue))
for k in range(Ktrue):
mask = TrueLabels == uniqueLabels[k]
resp[mask,k] = 1.0
return resp
|
mit
| -7,890,607,368,942,962,000
| 30.328571
| 85
| 0.671989
| false
| 3.41433
| false
| false
| false
|
pszemus/grpc
|
examples/python/debug/get_stats.py
|
1
|
1475
|
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Poll statistics from the server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import argparse
import grpc
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
def run(addr):
with grpc.insecure_channel(addr) as channel:
channelz_stub = channelz_pb2_grpc.ChannelzStub(channel)
response = channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
print('Info for all servers: %s' % response)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--addr',
nargs=1,
type=str,
default='[::]:50051',
help='the address to request')
args = parser.parse_args()
run(addr=args.addr)
if __name__ == '__main__':
logging.basicConfig()
main()
|
apache-2.0
| -7,507,955,330,443,291,000
| 28.5
| 74
| 0.697627
| false
| 3.841146
| false
| false
| false
|
amjames/psi4
|
psi4/driver/driver.py
|
1
|
119607
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with a *procedures* dictionary specifying available quantum
chemical methods and functions driving the main quantum chemical
functionality, namely single-point energies, geometry optimizations,
properties, and vibrational frequency calculations.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import sys
import json
import shutil
import numpy as np
from psi4.driver import driver_util
from psi4.driver import driver_cbs
from psi4.driver import driver_nbody
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.procrouting import *
from psi4.driver.p4util.exceptions import *
# never import wrappers or aliases into this file
def _find_derivative_type(ptype, method_name, user_dertype):
r"""
Figures out the derivative type (0, 1, 2) for a given method_name. Will
first use user default and then the highest available derivative type for
a given method.
"""
if ptype not in ['gradient', 'hessian']:
raise ValidationError("_find_derivative_type: ptype must either be gradient or hessian.")
dertype = "(auto)"
# If user type is None, try to find the highest derivative
if user_dertype is None:
if (ptype == 'hessian') and (method_name in procedures['hessian']):
dertype = 2
# Will need special logic if we ever have managed Hessians
elif method_name in procedures['gradient']:
dertype = 1
if procedures['gradient'][method_name].__name__.startswith('select_'):
try:
procedures['gradient'][method_name](method_name, probe=True)
except ManagedMethodError:
dertype = 0
elif method_name in procedures['energy']:
dertype = 0
else:
# Quick sanity check. Only *should* be able to be None or int, but hey, kids today...
if not isinstance(user_dertype, int):
raise ValidationError("_find_derivative_type: user_dertype should only be None or int!")
dertype = user_dertype
if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (dertype != 0):
raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with derivatives, so stopping.')
if (core.get_global_option('PCM')) and (dertype != 0):
core.print_out('\nPCM analytic gradients are not implemented yet, re-routing to finite differences.\n')
dertype = 0
# Summary validation
if (dertype == 2) and (method_name in procedures['hessian']):
pass
elif (dertype == 1) and (method_name in procedures['gradient']):
pass
elif (dertype == 0) and (method_name in procedures['energy']):
pass
else:
alternatives = ''
alt_method_name = p4util.text.find_approximate_string_matches(method_name, procedures['energy'].keys(), 2)
if len(alt_method_name) > 0:
alternatives = """ Did you mean? %s""" % (' '.join(alt_method_name))
raise ValidationError("""Derivative method 'name' %s and derivative level 'dertype' %s are not available.%s"""
% (method_name, str(dertype), alternatives))
return dertype
def _energy_is_invariant(gradient, stationary_criterion=1.e-2):
"""Polls options and probes `gradient` to return whether current method
and system expected to be invariant to translations and rotations of
the coordinate system.
"""
stationary_point = gradient.rms() < stationary_criterion # 1.e-2 pulled out of a hat
efp = core.get_active_efp()
efp_present = efp.nfragments() > 0
translations_projection_sound = (not core.get_option('SCF', 'EXTERN') and
not core.get_option('SCF', 'PERTURB_H') and
not efp_present)
rotations_projection_sound = (translations_projection_sound and
stationary_point)
return translations_projection_sound, rotations_projection_sound
def energy(name, **kwargs):
r"""Function to compute the single-point electronic energy.
:returns: *float* |w--w| Total electronic energy in Hartrees. SAPT & EFP return interaction energy.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY <CURRENTENERGY>`
* :psivar:`CURRENT REFERENCE ENERGY <CURRENTREFERENCEENERGY>`
* :psivar:`CURRENT CORRELATION ENERGY <CURRENTCORRELATIONENERGY>`
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type restart_file: string
:param restart_file: ``['file.1, file.32]`` || ``./file`` || etc.
Binary data files to be renamed for calculation restart.
.. _`table:energy_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | effective fragment potential (EFP) :ref:`[manual] <sec:libefp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf3c | HF with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| pbeh3c | PBEh with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dcft | density cumulant functional theory :ref:`[manual] <sec:dcft>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp3 | MP3 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4(sdq) | 4th-order MP perturbation theory (MP4) less triples :ref:`[manual] <sec:fnompn>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4(sdq) | MP4 (less triples) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4 | full MP4 :ref:`[manual] <sec:fnompn>` :ref:`[details] <tlmp4>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4 | full MP4 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp\ *n* | *n*\ th-order |MollerPlesset| (MP) perturbation theory :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| zapt\ *n* | *n*\ th-order z-averaged perturbation theory (ZAPT) :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2 | spin-component scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp2 | a special version of SCS-OMP2 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2-vdw | a special version of SCS-OMP2 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp2 | spin-opposite scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp2 | A special version of SOS-OMP2 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3 | spin-component scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp3 | a special version of SCS-OMP3 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3-vdw | a special version of SCS-OMP3 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp3 | spin-opposite scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp3 | A special version of SOS-OMP3 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccsd, cepa(0) | coupled electron pair approximation variant 0 :ref:`[manual] <sec:fnocepa>` :ref:`[details] <tllccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccsd, fno-cepa(0) | CEPA(0) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(1) | coupled electron pair approximation variant 1 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(1) | CEPA(1) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(3) | coupled electron pair approximation variant 3 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(3) | CEPA(3) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| acpf | averaged coupled-pair functional :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-acpf | ACPF with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| aqcc | averaged quadratic coupled cluster :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-aqcc | AQCC with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd | quadratic CI singles doubles (QCISD) :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd | QCISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccd | LCCD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc2 | approximate coupled cluster singles and doubles (CC2) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd | Brueckner coupled cluster doubles (BCCD) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd | CCSD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd(t) | QCISD with perturbative triples :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd(t) | QCISD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(at) | CCSD with asymmetric perturbative triples (CCSD(AT)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdat>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd(t) | BCCD with perturbative triples :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd(t) | CCSD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc3 | approximate CC singles, doubles, and triples (CC3) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccenergy | **expert** full control over ccenergy module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dfocc | **expert** full control over dfocc module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisd | configuration interaction (CI) singles and doubles (CISD) :ref:`[manual] <sec:ci>` :ref:`[details] <tlcisd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cisd | CISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdt | CI singles, doubles, and triples (CISDT) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdtq | CI singles, doubles, triples, and quadruples (CISDTQ) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ci\ *n* | *n*\ th-order CI :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fci | full configuration interaction (FCI) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| detci | **expert** full control over detci module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| casscf | complete active space self consistent field (CASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| rasscf | restricted active space self consistent field (RASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mcscf | multiconfigurational self consistent field (SCF) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| psimrcc | Mukherjee multireference coupled cluster (Mk-MRCC) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-scf | density matrix renormalization group SCF :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-caspt2 | density matrix renormalization group CASPT2 :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-ci | density matrix renormalization group CI :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0 | 0th-order symmetry adapted perturbation theory (SAPT) :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ssapt0 | 0th-order SAPT with special exchange scaling :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fisapt0 | 0th-order functional and/or intramolecular SAPT :ref:`[manual] <sec:fisapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2 | 2nd-order SAPT, traditional definition :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+ | SAPT including all 2nd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3) | SAPT including perturbative triples :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3 | SAPT including all 3rd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd) | SAPT2+ with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd) | SAPT2+(3) with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd) | SAPT2+3 with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+dmp2 | SAPT including all 2nd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)dmp2 | SAPT including perturbative triples and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3dmp2 | SAPT including all 3rd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)dmp2 | SAPT2+ with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)dmp2 | SAPT2+(3) with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)dmp2 | SAPT2+3 with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0-ct | 0th-order SAPT plus charge transfer (CT) calculation :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2-ct | SAPT2 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+-ct | SAPT2+ plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)-ct | SAPT2+(3) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3-ct | SAPT2+3 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)-ct | SAPT2+(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)-ct | SAPT2+(3)(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)-ct | SAPT2+3(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| adc | 2nd-order algebraic diagrammatic construction (ADC) :ref:`[manual] <sec:adc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc2 | EOM-CC2 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc3 | EOM-CC3 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. comment missing and why
.. comment a certain isapt --- marginally released
.. comment mrcc --- this is handled in its own table
.. comment psimrcc_scf --- convenience fn
.. include:: ../autodoc_dft_energy.rst
.. include:: ../mrcc_table_energy.rst
.. include:: ../cfour_table_energy.rst
:examples:
>>> # [1] Coupled-cluster singles and doubles calculation with psi code
>>> energy('ccsd')
>>> # [2] Charge-transfer SAPT calculation with scf projection from small into
>>> # requested basis, with specified projection fitting basis
>>> set basis_guess true
>>> set df_basis_guess jun-cc-pVDZ-JKFIT
>>> energy('sapt0-ct')
>>> # [3] Arbitrary-order MPn calculation
>>> energy('mp7')
>>> # [4] Converge scf as singlet, then run detci as triplet upon singlet reference
>>> # Note that the integral transformation is not done automatically when detci is run in a separate step.
>>> molecule H2 {\n0 1\nH\nH 1 0.74\n}
>>> set basis cc-pVDZ
>>> set reference rohf
>>> scf_e, scf_wfn = energy('scf', return_wfn=True)
>>> H2.set_multiplicity(3)
>>> core.MintsHelper(scf_wfn.basisset()).integrals()
>>> energy('detci', ref_wfn=scf_wfn)
>>> # [5] Run two CI calculations, keeping the integrals generated in the first one.
>>> molecule ne {\nNe\n}
>>> set basis cc-pVDZ
>>> cisd_e, cisd_wfn = energy('cisd', return_wfn=True)
>>> energy('fci', ref_wfn=cisd_wfn)
>>> # [6] Can automatically perform complete basis set extrapolations
>>> energy("CCSD/cc-pV[DT]Z")
>>> # [7] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> energy("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce if name is function
if hasattr(name, '__call__'):
return name(energy, kwargs.pop('label', 'custom function'), ptype='energy', **kwargs)
# Allow specification of methods to arbitrary order
lowername = name.lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Bounce to CP if bsse kwarg
if kwargs.get('bsse_type', None) is not None:
return driver_nbody.nbody_gufunc(energy, name, ptype='energy', **kwargs)
# Bounce to CBS if "method/basis" name
if "/" in lowername:
return driver_cbs._cbs_gufunc(energy, name, ptype='energy', **kwargs)
# Commit to procedures['energy'] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
#for precallback in hooks['energy']['pre']:
# precallback(lowername, **kwargs)
optstash = driver_util._set_convergence_criterion('energy', lowername, 6, 8, 6, 8, 6)
# Before invoking the procedure, we rename any file that should be read.
# This is a workaround to do restarts with the current PSI4 capabilities
# before actual, clean restarts are put in there
# Restartfile is always converted to a single-element list if
# it contains a single string
# DGAS Note: This is hacked together at this point and should be revamped.
if 'restart_file' in kwargs:
restartfile = kwargs['restart_file'] # Option still available for procedure-specific action
if not isinstance(restartfile, (list, tuple)):
restartfile = (restartfile, )
# Rename the files to be read to be consistent with psi4's file system
for item in restartfile:
name_split = re.split(r'\.', item)
if "npz" in item:
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(molecule.name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
file_num = item.split('.')[-2]
targetfile = os.path.join(psi_scratch, fname + "." + file_num + ".npz")
else:
filenum = name_split[-1]
try:
filenum = int(filenum)
except ValueError:
filenum = 32 # Default file number is the checkpoint one
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
filepath = psioh.get_file_path(filenum)
namespace = psio.get_default_namespace()
pid = str(os.getpid())
prefix = 'psi'
targetfile = filepath + prefix + '.' + pid + '.' + namespace + '.' + str(filenum)
shutil.copy(item, targetfile)
wfn = procedures['energy'][lowername](lowername, molecule=molecule, **kwargs)
for postcallback in hooks['energy']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
optstash.restore()
if return_wfn: # TODO current energy safer than wfn.energy() for now, but should be revisited
# TODO place this with the associated call, very awkward to call this in other areas at the moment
if lowername in ['efp', 'mrcc', 'dmrg', 'psimrcc']:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the incoming reference wavefunction.\n\n")
elif 'sapt' in lowername:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the dimer SCF wavefunction.\n\n")
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def gradient(name, **kwargs):
r"""Function complementary to :py:func:~driver.optimize(). Carries out one gradient pass,
deciding analytic or finite difference.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total electronic gradient in Hartrees/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| gradient and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Single-point dft gradient getting the gradient
>>> # in file, core.Matrix, and np.array forms
>>> set gradient_write on
>>> G, wfn = gradient('b3lyp-d', return_wfn=True)
>>> wfn.gradient().print_out()
>>> np.array(G)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Gradient: Cannot specify bsse_type for gradient yet.")
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
# Figure out lowername, dertype, and func
# If we have analytical gradients we want to pass to our wrappers, otherwise we want to run
# finite-diference energy or cbs energies
# TODO MP5/cc-pv[DT]Z behavior unkown due to "levels"
user_dertype = kwargs.pop('dertype', None)
if gradient_type == 'custom_function':
if user_dertype is None:
dertype = 0
core.print_out("\nGradient: Custom function passed in without a defined dertype, assuming fd-energy based gradient.\n")
else:
core.print_out("\nGradient: Custom function passed in with a dertype of %d\n" % user_dertype)
dertype = user_dertype
if dertype == 1:
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', 'scf', 8, 10, 8, 10, 8)
lowername = name
elif gradient_type == 'cbs_wrapper':
cbs_methods = driver_cbs._cbs_wrapper_methods(**kwargs)
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
if dertype == 1:
# Bounce to CBS (directly) in pure-gradient mode if name is CBS and all parts have analytic grad. avail.
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
lowername = name
# Pass through to G by E
elif gradient_type == 'cbs_gufunc':
cbs_methods = driver_cbs._parse_cbs_gufunc_string(name.lower())[0]
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
lowername = name.lower()
if dertype == 1:
# Bounce to CBS in pure-gradient mode if "method/basis" name and all parts have analytic grad. avail.
return driver_cbs._cbs_gufunc(gradient, name, ptype='gradient', **kwargs)
else:
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
else:
# Allow specification of methods to arbitrary order
lowername = name.lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Prevent methods that do not have associated gradients
if lowername in energy_only_methods:
raise ValidationError("gradient('%s') does not have an associated gradient" % name)
dertype = _find_derivative_type('gradient', lowername, user_dertype)
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Commit to procedures[] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# no analytic derivatives for scf_type cd
if core.get_global_option('SCF_TYPE') == 'CD':
if (dertype == 1):
raise ValidationError("""No analytic derivatives for SCF_TYPE CD.""")
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# S/R: Mode of operation- whether finite difference opt run in one job or files farmed out
opt_mode = kwargs.get('mode', 'continuous').lower()
if opt_mode == 'continuous':
pass
elif opt_mode == 'sow':
if dertype == 1:
raise ValidationError("""Optimize execution mode 'sow' not valid for analytic gradient calculation.""")
elif opt_mode == 'reap':
opt_linkage = kwargs.get('linkage', None)
if opt_linkage is None:
raise ValidationError("""Optimize execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Optimize execution mode '%s' not valid.""" % (opt_mode))
# Does dertype indicate an analytic procedure both exists and is wanted?
if dertype == 1:
core.print_out("""gradient() will perform analytic gradient computation.\n""")
# Perform the gradient calculation
wfn = procedures['gradient'][lowername](lowername, molecule=molecule, **kwargs)
optstash.restore()
if return_wfn:
return (wfn.gradient(), wfn)
else:
return wfn.gradient()
else:
core.print_out("""gradient() will perform gradient computation by finite difference of analytic energies.\n""")
opt_iter = kwargs.get('opt_iter', 1)
if opt_iter is True:
opt_iter = 1
if opt_iter == 1:
print('Performing finite difference calculations')
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
# print("about to generate displacements")
displacements = core.fd_geoms_1_0(moleculeclone)
# print(displacements)
ndisp = len(displacements)
# print("generated displacments")
# This version is pretty dependent on the reference geometry being last (as it is now)
print(""" %d displacements needed ...""" % (ndisp), end='')
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if opt_mode == 'sow':
instructionsO = """\n The optimization sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """ to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """ has produced a number of input files (OPT-%s-*.in) for individual components\n""" % (str(opt_iter))
instructionsO += """ and a single input file (OPT-master.in) with an optimize(mode='reap') command.\n"""
instructionsO += """ These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """ rather than normal input. Follow the instructions in OPT-master.in to continue.\n\n"""
instructionsO += """ Alternatively, a single-job execution of the gradient may be accessed through\n"""
instructionsO += """ the optimization wrapper option mode='continuous'.\n\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this optimization cycle.\n#\n"""
instructionsM += """# (1) Run all of the OPT-%s-*.in input files on any variety of computer architecture.\n""" % (str(opt_iter))
instructionsM += """# The output file names must be as given below.\n#\n"""
for rgt in range(ndisp):
pre = 'OPT-' + str(opt_iter) + '-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# OPT-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the gradient step in its output file.\n#\n"""
if opt_iter == 1:
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n""" % ('OPT-master.in', 'OPT-master.out')
else:
instructionsM += """# psi4 -a -i %-27s -o %-27s\n#\n""" % ('OPT-master.in', 'OPT-master.out')
instructionsM += """# After each optimization iteration, the OPT-master.in file is overwritten so return here\n"""
instructionsM += """# for new instructions. With the use of the psi4 -a flag, OPT-master.out is not\n"""
instructionsM += """# overwritten and so maintains a history of the job. To use the (binary) optimizer\n"""
instructionsM += """# data file to accelerate convergence, the OPT-master jobs must run on the same computer.\n\n"""
with open('OPT-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input().encode('utf-8'))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, dertype=dertype, **kwargs)
fmaster.write(("""retE, retwfn = optimize('%s', **kwargs)\n\n""" % (lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
for n, displacement in enumerate(displacements):
rfile = 'OPT-%s-%s' % (opt_iter, n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Gradient %d Computation: Displacement %d ')\n""" % (opt_iter, n + 1)
banners += """core.print_out('\\n')\n\n"""
if opt_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule
moleculeclone.set_geometry(displacement)
# Perform the energy calculation
E, wfn = energy(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
energies.append(core.get_variable('CURRENT ENERGY'))
# S/R: Write each displaced geometry to an input file
elif opt_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, and kwargs
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
freagent.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
freagent.write(p4util.format_options_for_input().encode('utf-8'))
p4util.format_kwargs_for_input(freagent, **kwargs)
# S/R: Prepare function call and energy save
freagent.write(("""electronic_energy = energy('%s', **kwargs)\n\n""" % (lowername)).encode('utf-8'))
freagent.write(("""core.print_out('\\nGRADIENT RESULT: computation %d for item %d """ % (os.getpid(), n + 1)).encode('utf-8'))
freagent.write("""yields electronic energy %20.12f\\n' % (electronic_energy))\n\n""".encode('utf-8'))
# S/R: Read energy from each displaced geometry output file and save in energies array
elif opt_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
energies.append(p4util.extract_sowreap_from_output(rfile, 'GRADIENT', n, opt_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if opt_mode == 'sow':
optstash.restore()
if return_wfn:
return (None, None) # any point to building a dummy wfn here?
else:
return None
elif opt_mode == 'reap':
core.set_variable('CURRENT ENERGY', energies[-1])
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Compute the gradient; last item in 'energies' is undisplaced
core.set_local_option('FINDIF', 'GRADIENT_WRITE', True)
G = core.fd_1_0(molecule, energies)
G.print_out()
wfn.set_gradient(G)
optstash.restore()
if return_wfn:
return (wfn.gradient(), wfn)
else:
return wfn.gradient()
def properties(*args, **kwargs):
r"""Function to compute various properties.
:aliases: prop()
:returns: none.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- This function at present has a limited functionality.
Consult the keywords sections of other modules for further property capabilities.
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| Name | Calls Method | Reference | Supported Properties |
+====================+===============================================+================+===============================================================+
| scf | Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| hf | HF Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| mp2 | MP2 with density fitting only (mp2_type df) | RHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cc2 | 2nd-order approximate CCSD | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| ccsd | Coupled cluster singles and doubles (CCSD) | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-cc2 | 2nd-order approximate EOM-CCSD | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-ccsd | Equation-of-motion CCSD (EOM-CCSD) | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cisd, cisdt, | Configuration interaction | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| cisdt, cisdtq, | | | transition_quadrupole |
| ci5, ..., fci | | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| casscf, rasscf | Multi-configurational SCF | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| | | | transition_quadrupole |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
:type name: string
:param name: ``'ccsd'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type properties: array of strings
:param properties: |dl| ``[]`` |dr| || ``['rotation', 'polarizability', 'oscillator_strength', 'roa']`` || etc.
Indicates which properties should be computed. Defaults to dipole and quadrupole.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:examples:
>>> # [1] Optical rotation calculation
>>> properties('cc2', properties=['rotation'])
"""
kwargs = p4util.kwargs_lower(kwargs)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
kwargs['molecule'] = molecule
# Allow specification of methods to arbitrary order
lowername = args[0].lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
if "/" in lowername:
return driver_cbs._cbs_gufunc(properties, lowername, ptype='properties', **kwargs)
return_wfn = kwargs.pop('return_wfn', False)
props = kwargs.get('properties', ['dipole', 'quadrupole'])
if len(args) > 1:
props += args[1:]
kwargs['properties'] = p4util.drop_duplicates(props)
optstash = driver_util._set_convergence_criterion('properties', lowername, 6, 10, 6, 10, 8)
wfn = procedures['properties'][lowername](lowername, **kwargs)
optstash.restore()
if return_wfn:
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def optimize(name, **kwargs):
r"""Function to perform a geometry optimization.
:aliases: opt()
:returns: *float* |w--w| Total electronic energy of optimized structure in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:raises: psi4.OptimizationConvergenceError if |optking__geom_maxiter| exceeded without reaching geometry convergence.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY <CURRENTENERGY>`
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`~driver.energy`.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type return_history: :ref:`boolean <op_py_boolean>`
:param return_history: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return dictionary of lists of geometries,
energies, and gradients at each step in the optimization.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``opt_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
For a finite difference of energies optimization, indicates whether
the calculations required to complete the
optimization are to be run in one file (``'continuous'``) or are to be
farmed out in an embarrassingly parallel fashion
(``'sow'``/``'reap'``). For the latter, run an initial job with
``'sow'`` and follow instructions in its output file. For maximum
flexibility, ``return_wfn`` is always on in ``'reap'`` mode.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available) or finite difference
optimization is to be performed.
:type hessian_with: string
:param hessian_with: ``'scf'`` || ``'mp2'`` || etc.
Indicates the computational method with which to perform a hessian
analysis to guide the geometry optimization.
.. warning:: Optimizations where the molecule is specified in Z-matrix format
with dummy atoms will result in the geometry being converted to a Cartesian representation.
.. note:: Analytic gradients area available for all methods in the table
below. Optimizations with other methods in the energy table proceed
by finite differences.
.. _`table:grad_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | efp-only optimizations |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dcft | density cumulant functional theory :ref:`[manual] <sec:dcft>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. _`table:grad_scf`:
.. include:: ../autodoc_dft_opt.rst
.. include:: ../cfour_table_grad.rst
:examples:
>>> # [1] Analytic hf optimization
>>> optimize('hf')
>>> # [2] Finite difference mp5 optimization with gradient
>>> # printed to output file
>>> e, wfn = opt('mp5', return_wfn='yes')
>>> wfn.gradient().print_out()
>>> # [3] Forced finite difference hf optimization run in
>>> # embarrassingly parallel fashion
>>> optimize('hf', dertype='energy', mode='sow')
>>> # [4] Can automatically perform complete basis set extrapolations
>>> optimize('MP2/cc-pV([D,T]+d)Z')
>>> # [5] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> optimize("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
>>> # [6] Get info like geometry, gradient, energy back after an
>>> # optimization fails. Note that the energy and gradient
>>> # correspond to the last optimization cycle, whereas the
>>> # geometry (by default) is the anticipated *next* optimization step.
>>> try:
>>> optimize('hf/cc-pvtz')
>>> except psi4.OptimizationConvergenceError as ex:
>>> next_geom_coords_as_numpy_array = np.asarray(ex.wfn.molecule().geometry())
"""
kwargs = p4util.kwargs_lower(kwargs)
if hasattr(name, '__call__'):
lowername = name
custom_gradient = True
else:
lowername = name.lower()
custom_gradient = False
return_wfn = kwargs.pop('return_wfn', False)
return_history = kwargs.pop('return_history', False)
if return_history:
# Add wfn once the deep copy issues are worked out
step_energies = []
step_gradients = []
step_coordinates = []
# For CBS wrapper, need to set retention on INTCO file
if custom_gradient or ('/' in lowername):
core.IOManager.shared_object().set_specific_retention(1, True)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Optimize: Does not currently support 'bsse_type' arguements")
full_hess_every = core.get_option('OPTKING', 'FULL_HESS_EVERY')
steps_since_last_hessian = 0
if custom_gradient and core.has_option_changed('OPTKING', 'FULL_HESS_EVERY'):
raise ValidationError("Optimize: Does not support custom Hessian's yet.")
else:
hessian_with_method = kwargs.get('hessian_with', lowername)
# are we in sow/reap mode?
opt_mode = kwargs.get('mode', 'continuous').lower()
if opt_mode not in ['continuous', 'sow', 'reap']:
raise ValidationError("""Optimize execution mode '%s' not valid.""" % (opt_mode))
optstash = p4util.OptionsState(
['OPTKING', 'INTRAFRAG_STEP_LIMIT'],
['FINDIF', 'HESSIAN_WRITE'],
['OPTKING', 'CART_HESS_READ'],
['SCF', 'GUESS_PERSIST'], # handle on behalf of cbs()
['SCF', 'GUESS'])
n = kwargs.get('opt_iter', 1)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
# If we are freezing cartesian, do not orient or COM
if core.get_local_option("OPTKING", "FROZEN_CARTESIAN"):
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
initial_sym = moleculeclone.schoenflies_symbol()
while n <= core.get_option('OPTKING', 'GEOM_MAXITER'):
current_sym = moleculeclone.schoenflies_symbol()
if initial_sym != current_sym:
raise ValidationError("""Point group changed! (%s <-- %s) You should restart """
"""using the last geometry in the output, after """
"""carefully making sure all symmetry-dependent """
"""input, such as DOCC, is correct.""" %
(current_sym, initial_sym))
kwargs['opt_iter'] = n
# Use orbitals from previous iteration as a guess
# set within loop so that can be influenced by fns to optimize (e.g., cbs)
if (n > 1) and (opt_mode == 'continuous') and (not core.get_option('SCF', 'GUESS_PERSIST')):
core.set_local_option('SCF', 'GUESS', 'READ')
# Before computing gradient, save previous molecule and wavefunction if this is an IRC optimization
if (n > 1) and (core.get_option('OPTKING', 'OPT_TYPE') == 'IRC'):
old_thisenergy = core.get_variable('CURRENT ENERGY')
# Compute the gradient
G, wfn = gradient(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
thisenergy = core.get_variable('CURRENT ENERGY')
# above, used to be getting energy as last of energy list from gradient()
# thisenergy below should ultimately be testing on wfn.energy()
# Record optimization steps
# Add wavefunctions later
if return_history:
step_energies.append(thisenergy)
step_coordinates.append(moleculeclone.geometry())
step_gradients.append(G.clone())
# S/R: Quit after getting new displacements or if forming gradient fails
if opt_mode == 'sow':
return (0.0, None)
elif opt_mode == 'reap' and thisenergy == 0.0:
return (0.0, None)
core.set_gradient(G)
# S/R: Move opt data file from last pass into namespace for this pass
if opt_mode == 'reap' and n != 0:
core.IOManager.shared_object().set_specific_retention(1, True)
core.IOManager.shared_object().set_specific_path(1, './')
if 'opt_datafile' in kwargs:
restartfile = kwargs.pop('opt_datafile')
shutil.copy(restartfile, p4util.get_psifile(1))
# opt_func = kwargs.get('opt_func', kwargs.get('func', energy))
# if opt_func.__name__ == 'complete_basis_set':
# core.IOManager.shared_object().set_specific_retention(1, True)
if full_hess_every > -1:
core.set_global_option('HESSIAN_WRITE', True)
# compute Hessian as requested; frequency wipes out gradient so stash it
if ((full_hess_every > -1) and (n == 1)) or (steps_since_last_hessian + 1 == full_hess_every):
G = core.get_gradient() # TODO
core.IOManager.shared_object().set_specific_retention(1, True)
core.IOManager.shared_object().set_specific_path(1, './')
frequencies(hessian_with_method, **kwargs)
steps_since_last_hessian = 0
core.set_gradient(G)
core.set_global_option('CART_HESS_READ', True)
elif (full_hess_every == -1) and core.get_global_option('CART_HESS_READ') and (n == 1):
pass
# Do nothing; user said to read existing hessian once
else:
core.set_global_option('CART_HESS_READ', False)
steps_since_last_hessian += 1
# Take step. communicate to/from/within optking through legacy_molecule
core.set_legacy_molecule(moleculeclone)
optking_rval = core.optking()
moleculeclone = core.get_legacy_molecule()
moleculeclone.update_geometry()
if optking_rval == core.PsiReturnType.EndLoop:
# if this is the end of an IRC run, set wfn, energy, and molecule to that
# of the last optimized IRC point
if core.get_option('OPTKING', 'OPT_TYPE') == 'IRC':
thisenergy = old_thisenergy
print('Optimizer: Optimization complete!')
core.print_out('\n Final optimized geometry and variables:\n')
moleculeclone.print_in_input_format()
# Check if user wants to see the intcos; if so, don't delete them.
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
# Changing environment to optimized geometry as expected by user
molecule.set_geometry(moleculeclone.geometry())
for postcallback in hooks['optimize']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
core.clean()
# S/R: Clean up opt input file
if opt_mode == 'reap':
with open('OPT-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
fmaster.write('# Optimization complete!\n\n'.encode('utf-8'))
# Cleanup binary file 1
if custom_gradient or ('/' in lowername):
core.IOManager.shared_object().set_specific_retention(1, False)
optstash.restore()
if return_history:
history = { 'energy' : step_energies ,
'gradient' : step_gradients ,
'coordinates' : step_coordinates,
}
if return_wfn and return_history:
return (thisenergy, wfn, history)
elif return_wfn and not return_history:
return (thisenergy, wfn)
elif return_history and not return_wfn:
return (thisenergy, history)
else:
return thisenergy
elif optking_rval == core.PsiReturnType.Failure:
print('Optimizer: Optimization failed!')
if (core.get_option('OPTKING', 'KEEP_INTCOS') == False):
core.opt_clean()
molecule.set_geometry(moleculeclone.geometry())
core.clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
return thisenergy
core.print_out('\n Structure for next step:\n')
moleculeclone.print_in_input_format()
# S/R: Preserve opt data file for next pass and switch modes to get new displacements
if opt_mode == 'reap':
kwargs['opt_datafile'] = p4util.get_psifile(1)
kwargs['mode'] = 'sow'
n += 1
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
def hessian(name, **kwargs):
r"""Function complementary to :py:func:`~frequency`. Computes force
constants, deciding analytic, finite difference of gradients, or
finite difference of energies.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total non-mass-weighted electronic Hessian in Hartrees/Bohr/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| Hessian and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Frequency calculation without thermochemical analysis
>>> hessian('mp3')
>>> # [2] Frequency calc w/o thermo analysis getting the Hessian
>>> # in file, core.Matrix, and np.array forms
>>> set hessian_write on
>>> H, wfn = hessian('ccsd', return_wfn=True)
>>> wfn.hessian().print_out()
>>> np.array(H)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Hessian: Cannot specify bsse_type for hessian yet.")
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
if gradient_type != 'conventional':
raise ValidationError("Hessian: Does not yet support more advanced input or custom functions.")
lowername = name.lower()
# Check if this is a CBS extrapolation
if "/" in lowername:
return driver_cbs._cbs_gufunc('hessian', lowername, **kwargs)
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
dertype = 2
# Prevent methods that do not have associated energies
if lowername in energy_only_methods:
raise ValidationError("hessian('%s') does not have an associated hessian" % name)
optstash = p4util.OptionsState(
['FINDIF', 'HESSIAN_WRITE'],
['FINDIF', 'FD_PROJECT'],
)
# Allow specification of methods to arbitrary order
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
dertype = _find_derivative_type('hessian', lowername, kwargs.pop('freq_dertype', kwargs.pop('dertype', None)))
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# S/R: Mode of operation- whether finite difference freq run in one job or files farmed out
freq_mode = kwargs.pop('mode', 'continuous').lower()
if freq_mode == 'continuous':
pass
elif freq_mode == 'sow':
if dertype == 2:
raise ValidationError("""Frequency execution mode 'sow' not valid for analytic Hessian calculation.""")
elif freq_mode == 'reap':
freq_linkage = kwargs.get('linkage', None)
if freq_linkage is None:
raise ValidationError("""Frequency execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Frequency execution mode '%s' not valid.""" % (freq_mode))
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Select certain irreps
irrep = kwargs.get('irrep', -1)
if irrep == -1:
pass # do all irreps
else:
irrep = driver_util.parse_cotton_irreps(irrep, molecule.schoenflies_symbol())
irrep -= 1 # A1 irrep is externally 1, internally 0
if dertype == 2:
core.print_out("""hessian() switching to finite difference by gradients for partial Hessian calculation.\n""")
dertype = 1
# At stationary point?
if 'ref_gradient' in kwargs:
core.print_out("""hessian() using ref_gradient to assess stationary point.\n""")
G0 = kwargs['ref_gradient']
else:
G0 = gradient(lowername, molecule=molecule, **kwargs)
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(G0)
core.print_out('\n Based on options and gradient (rms={:.2E}), recommend {}projecting translations and {}projecting rotations.\n'.
format(G0.rms(), '' if translations_projection_sound else 'not ',
'' if rotations_projection_sound else 'not '))
if not core.has_option_changed('FINDIF', 'FD_PROJECT'):
core.set_local_option('FINDIF', 'FD_PROJECT', rotations_projection_sound)
# Does an analytic procedure exist for the requested method?
if dertype == 2:
core.print_out("""hessian() will perform analytic frequency computation.\n""")
# We have the desired method. Do it.
wfn = procedures['hessian'][lowername](lowername, molecule=molecule, **kwargs)
wfn.set_gradient(G0)
optstash.restore()
optstash_conv.restore()
# TODO: check that current energy's being set to the right figure when this code is actually used
core.set_variable('CURRENT ENERGY', wfn.energy())
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
elif dertype == 1:
core.print_out("""hessian() will perform frequency computation by finite difference of analytic gradients.\n""")
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
displacements = core.fd_geoms_freq_1(moleculeclone, irrep)
moleculeclone.reinterpret_coordentry(False)
moleculeclone.fix_orientation(True)
# Record undisplaced symmetry for projection of displaced point groups
core.set_parent_symmetry(molecule.schoenflies_symbol())
ndisp = len(displacements)
print(""" %d displacements needed.""" % ndisp)
gradients = []
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if freq_mode == 'sow':
instructionsO = """\n# The frequency sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """# to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """# has produced a number of input files (FREQ-*.in) for individual components\n"""
instructionsO += """# and a single input file (FREQ-master.in) with a frequency(mode='reap') command.\n"""
instructionsO += """# These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """# rather than normal input. Follow the instructions below (repeated in FREQ-master.in)\n"""
instructionsO += """# to continue.\n#\n"""
instructionsO += """# Alternatively, a single-job execution of the hessian may be accessed through\n"""
instructionsO += """# the frequency wrapper option mode='continuous'.\n#\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this frequency computation.\n#\n"""
instructionsM += """# (1) Run all of the FREQ-*.in input files on any variety of computer architecture.\n"""
instructionsM += """# The output file names must be as given below (these are the defaults when executed\n"""
instructionsM += """# as `psi4 FREQ-1.in`, etc.).\n#\n"""
for rgt in range(ndisp):
pre = 'FREQ-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# FREQ-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the frequency computation in its output file.\n#\n"""
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n\n""" % ('FREQ-master.in', 'FREQ-master.out')
with open('FREQ-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input(moleculeclone, **kwargs))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, freq_dertype=1, **kwargs)
fmaster.write(("""retE, retwfn = %s('%s', **kwargs)\n\n""" % (frequency.__name__, lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
core.print_out(instructionsM)
for n, displacement in enumerate(displacements):
rfile = 'FREQ-%s' % (n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Hessian Computation: Gradient Displacement %d ')\n""" % (n + 1)
banners += """core.print_out('\\n')\n\n"""
if freq_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule (xyz coordinates only)
moleculeclone.set_geometry(displacement)
# Perform the gradient calculation
G, wfn = gradient(lowername, molecule=moleculeclone, return_wfn=True, **kwargs)
gradients.append(wfn.gradient())
energies.append(core.get_variable('CURRENT ENERGY'))
# clean may be necessary when changing irreps of displacements
core.clean()
# S/R: Write each displaced geometry to an input file
elif freq_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, kwargs, function call and energy save
# forcexyz in molecule writer S/R enforcement of !reinterpret_coordentry above
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n')
freagent.write(p4util.format_molecule_for_input(moleculeclone, forcexyz=True).encode('utf-8'))
freagent.write(p4util.format_options_for_input(moleculeclone, **kwargs).encode('utf-8'))
kwargs['return_wfn'] = True
p4util.format_kwargs_for_input(freagent, **kwargs)
freagent.write("""G, wfn = %s('%s', **kwargs)\n\n""" % (gradient.__name__, lowername))
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic gradient %r\\n' % (p4util.mat2arr(wfn.gradient())))\n\n""")
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic energy %20.12f\\n' % (get_variable('CURRENT ENERGY')))\n\n""")
# S/R: Read energy from each displaced geometry output file and save in energies array
elif freq_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
pygrad = p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True, label='electronic gradient')
p4mat = core.Matrix.from_list(pygrad)
p4mat.print_out()
gradients.append(p4mat)
energies.append(p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if freq_mode == 'sow':
optstash.restore()
optstash_conv.restore()
if return_wfn:
return (None, None)
else:
return None
elif freq_mode == 'reap':
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Assemble Hessian from gradients
# Final disp is undisp, so wfn has mol, G, H general to freq calc
H = core.fd_freq_1(molecule, gradients, irrep) # TODO or moleculeclone?
wfn.set_hessian(H)
wfn.set_gradient(G0)
wfn.set_frequencies(core.get_frequencies())
# The last item in the list is the reference energy, return it
core.set_variable('CURRENT ENERGY', energies[-1])
core.set_parent_symmetry('')
optstash.restore()
optstash_conv.restore()
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
else:
core.print_out("""hessian() will perform frequency computation by finite difference of analytic energies.\n""")
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash.restore()
optstash_conv.restore()
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 10, 11, 10, 11, 10)
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
displacements = core.fd_geoms_freq_0(moleculeclone, irrep)
moleculeclone.fix_orientation(True)
moleculeclone.reinterpret_coordentry(False)
# Record undisplaced symmetry for projection of diplaced point groups
core.set_parent_symmetry(molecule.schoenflies_symbol())
ndisp = len(displacements)
# This version is pretty dependent on the reference geometry being last (as it is now)
print(' %d displacements needed.' % ndisp)
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if freq_mode == 'sow':
instructionsO = """\n# The frequency sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """# to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """# has produced a number of input files (FREQ-*.in) for individual components\n"""
instructionsO += """# and a single input file (FREQ-master.in) with a frequency(mode='reap') command.\n"""
instructionsO += """# These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """# rather than normal input. Follow the instructions below (repeated in FREQ-master.in)\n"""
instructionsO += """# to continue.\n#\n"""
instructionsO += """# Alternatively, a single-job execution of the hessian may be accessed through\n"""
instructionsO += """# the frequency wrapper option mode='continuous'.\n#\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this frequency computation.\n#\n"""
instructionsM += """# (1) Run all of the FREQ-*.in input files on any variety of computer architecture.\n"""
instructionsM += """# The output file names must be as given below (these are the defaults when executed\n"""
instructionsM += """# as `psi4 FREQ-1.in`, etc.).\n#\n"""
for rgt in range(ndisp):
pre = 'FREQ-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# FREQ-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the frequency computation in its output file.\n#\n"""
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n\n""" % ('FREQ-master.in', 'FREQ-master.out')
with open('FREQ-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input(moleculeclone, **kwargs))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, freq_dertype=0, **kwargs)
fmaster.write(("""retE, retwfn = %s('%s', **kwargs)\n\n""" % (frequency.__name__, lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
core.print_out(instructionsM)
for n, displacement in enumerate(displacements):
rfile = 'FREQ-%s' % (n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Hessian Computation: Energy Displacement %d ')\n""" % (n + 1)
banners += """core.print_out('\\n')\n\n"""
if freq_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule
moleculeclone.set_geometry(displacement)
# Perform the energy calculation
E, wfn = energy(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
energies.append(core.get_variable('CURRENT ENERGY'))
# clean may be necessary when changing irreps of displacements
core.clean()
# S/R: Write each displaced geometry to an input file
elif freq_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, kwargs, function call and energy save
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n')
freagent.write(p4util.format_molecule_for_input(moleculeclone, forcexyz=True).encode('utf-8'))
freagent.write(p4util.format_options_for_input(moleculeclone, **kwargs).encode('utf-8'))
p4util.format_kwargs_for_input(freagent, **kwargs)
freagent.write("""electronic_energy = %s('%s', **kwargs)\n\n""" % (energy.__name__, lowername))
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic energy %20.12f\\n' % (electronic_energy))\n\n""")
# S/R: Read energy from each displaced geometry output file and save in energies array
elif freq_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
energies.append(p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if freq_mode == 'sow':
optstash.restore()
optstash_conv.restore()
if return_wfn:
return (None, None)
else:
return None
elif freq_mode == 'reap':
# core.set_variable('CURRENT ENERGY', energies[-1])
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Assemble Hessian from energies
H = core.fd_freq_0(molecule, energies, irrep)
wfn.set_hessian(H)
wfn.set_gradient(G0)
wfn.set_frequencies(core.get_frequencies())
# The last item in the list is the reference energy, return it
core.set_variable('CURRENT ENERGY', energies[-1])
core.set_parent_symmetry('')
optstash.restore()
optstash_conv.restore()
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
def frequency(name, **kwargs):
r"""Function to compute harmonic vibrational frequencies.
:aliases: frequencies(), freq()
:returns: *float* |w--w| Total electronic energy in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
Arrays of frequencies and the Hessian can be accessed through the wavefunction.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``freq_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
For a finite difference of energies or gradients frequency, indicates
whether the calculations required to complete the frequency are to be run
in one file (``'continuous'``) or are to be farmed out in an
embarrassingly parallel fashion (``'sow'``/``'reap'``)/ For the latter,
run an initial job with ``'sow'`` and follow instructions in its output file.
For maximum flexibility, ``return_wfn`` is always on in ``'reap'`` mode.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: |dl| ``'hessian'`` |dr| || ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available- they're not), finite
difference of gradients (if available) or finite difference of
energies is to be performed.
:type irrep: int or string
:param irrep: |dl| ``-1`` |dr| || ``1`` || ``'b2'`` || ``'App'`` || etc.
Indicates which symmetry block (:ref:`Cotton <table:irrepOrdering>` ordering) of vibrational
frequencies to be computed. ``1``, ``'1'``, or ``'a1'`` represents
:math:`a_1`, requesting only the totally symmetric modes.
``-1`` indicates a full frequency calculation.
.. note:: Analytic hessians are only available for RHF. For all other methods, Frequencies will
proceed through finite differences according to availability of gradients or energies.
.. _`table:freq_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| scf | Hartree--Fock (HF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
:examples:
>>> # [1] Frequency calculation for all modes through highest available derivatives
>>> frequency('ccsd')
>>> # [2] Frequency calculation for b2 modes through finite difference of gradients
>>> # printing lowest mode frequency to screen and Hessian to output
>>> E, wfn = frequencies('scf', dertype=1, irrep=4, return_wfn=True)
>>> print wfn.frequencies().get(0, 0)
>>> wfn.hessian().print_out()
>>> # [3] Frequency calculation at default conditions and Hessian reuse at STP
>>> E, wfn = freq('mp2', return_wfn=True)
>>> set t 273.15
>>> set p 100000
>>> thermo(wfn, wfn.frequencies())
>>> # [4] Opt+Freq, skipping the gradient recalc at the start of the Hessian
>>> e, wfn = optimize('hf', return_wfn=True)
>>> frequencies('hf', ref_gradient=wfn.gradient())
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce (someday) if name is function
if hasattr(name, '__call__'):
raise ValidationError("Frequency: Cannot use custom function")
lowername = name.lower()
if "/" in lowername:
return driver_cbs._cbs_gufunc(frequency, name, ptype='frequency', **kwargs)
if kwargs.get('bsse_type', None) is not None:
raise ValdiationError("Frequency: Does not currently support 'bsse_type' arguements")
return_wfn = kwargs.pop('return_wfn', False)
# are we in sow/reap mode?
freq_mode = kwargs.get('mode', 'continuous').lower()
if freq_mode not in ['continuous', 'sow', 'reap']:
raise ValidationError("""Frequency execution mode '%s' not valid.""" % (freq_mode))
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Compute the hessian
H, wfn = hessian(lowername, return_wfn=True, molecule=molecule, **kwargs)
# S/R: Quit after getting new displacements
if freq_mode == 'sow':
return 0.0
# Project final frequencies?
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(wfn.gradient())
project_trans = kwargs.get('project_trans', translations_projection_sound)
project_rot = kwargs.get('project_rot', rotations_projection_sound)
irrep = kwargs.get('irrep', None)
vibinfo = vibanal_wfn(wfn, irrep=irrep, project_trans=project_trans, project_rot=project_rot)
vibonly = qcdb.vib.filter_nonvib(vibinfo)
wfn.set_frequencies(core.Vector.from_array(qcdb.vib.filter_omega_to_real(vibonly['omega'].data)))
wfn.frequency_analysis = vibinfo
for postcallback in hooks['frequency']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
if return_wfn:
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def vibanal_wfn(wfn, hess=None, irrep=None, molecule=None, project_trans=True, project_rot=True):
if hess is None:
nmwhess = np.asarray(wfn.hessian())
else:
nmwhess = hess
mol = wfn.molecule()
geom = np.asarray(mol.geometry())
symbols = [mol.symbol(at) for at in range(mol.natom())]
vibrec = {'molecule': mol.to_dict(np_out=False),
'hessian': nmwhess.tolist()}
if molecule is not None:
molecule.update_geometry()
if mol.natom() != molecule.natom():
raise ValidationError('Impostor molecule trying to be analyzed! natom {} != {}'.format(mol.natom(), molecule.natom()))
if abs(mol.nuclear_repulsion_energy() - molecule.nuclear_repulsion_energy()) > 1.e-6:
raise ValidationError('Impostor molecule trying to be analyzed! NRE {} != {}'.format(mol.nuclear_repulsion_energy(), molecule.nuclear_repulsion_energy()))
if not np.allclose(np.asarray(mol.geometry()), np.asarray(molecule.geometry()), atol=1.e-6):
core.print_out('Warning: geometry center/orientation mismatch. Normal modes may not be in expected coordinate system.')
# raise ValidationError('Impostor molecule trying to be analyzed! geometry\n{}\n !=\n{}'.format(
# np.asarray(mol.geometry()), np.asarray(molecule.geometry())))
mol = molecule
m = np.asarray([mol.mass(at) for at in range(mol.natom())])
irrep_labels = mol.irrep_labels()
vibinfo, vibtext = qcdb.vib.harmonic_analysis(nmwhess, geom, m, wfn.basisset(), irrep_labels,
project_trans=project_trans, project_rot=project_rot)
vibrec.update({k: qca.to_dict() for k, qca in vibinfo.items()})
core.print_out(vibtext)
core.print_out(qcdb.vib.print_vibs(vibinfo, shortlong=True, normco='x', atom_lbl=symbols))
if core.has_option_changed('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER'):
rsn = core.get_option('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER')
else:
rsn = mol.rotational_symmetry_number()
if irrep is None:
therminfo, thermtext = qcdb.vib.thermo(vibinfo,
T=core.get_option("THERMO", "T"), # 298.15 [K]
P=core.get_option("THERMO", "P"), # 101325. [Pa]
multiplicity=mol.multiplicity(),
molecular_mass=np.sum(m),
sigma=rsn,
rotor_type=mol.rotor_type(),
rot_const=np.asarray(mol.rotational_constants()),
E0=core.get_variable('CURRENT ENERGY')) # someday, wfn.energy()
vibrec.update({k: qca.to_dict() for k, qca in therminfo.items()})
core.set_variable("ZPVE", therminfo['ZPE_corr'].data)
core.set_variable("THERMAL ENERGY CORRECTION", therminfo['E_corr'].data)
core.set_variable("ENTHALPY CORRECTION", therminfo['H_corr'].data)
core.set_variable("GIBBS FREE ENERGY CORRECTION", therminfo['G_corr'].data)
core.set_variable("ZERO K ENTHALPHY", therminfo['ZPE_tot'].data)
core.set_variable("THERMAL ENERGY", therminfo['E_tot'].data)
core.set_variable("ENTHALPY", therminfo['H_tot'].data)
core.set_variable("GIBBS FREE ENERGY", therminfo['G_tot'].data)
core.print_out(thermtext)
else:
core.print_out(' Thermochemical analysis skipped for partial frequency calculation.\n')
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".vibrec"
with open(filename, 'w') as handle:
json.dump(vibrec, handle, sort_keys=True, indent=4)
if core.get_option('FINDIF', 'NORMAL_MODES_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".molden_normal_modes"
with open(filename, 'w') as handle:
handle.write(qcdb.vib.print_molden_vibs(vibinfo, symbols, geom, standalone=True))
return vibinfo
def _hessian_write(wfn):
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".hess"
with open(filename, 'wb') as handle:
qcdb.hessparse.to_string(np.asarray(wfn.hessian()), handle, dtype='psi4')
def gdma(wfn, datafile=""):
"""Function to use wavefunction information in *wfn* and, if specified,
additional commands in *filename* to run GDMA analysis.
.. include:: ../autodoc_abbr_options_c.rst
.. versionadded:: 0.6
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate DMA analysis
:type datafile: string
:param datafile: optional control file (see GDMA manual) to peform more complicated DMA
analyses. If this option is used, the File keyword must be set to read
a filename.fchk, where filename is provided by |globals__writer_file_label| .
:examples:
>>> # [1] DMA analysis from MP2 wavefunction. N.B. gradient must be requested to generate MP2 density.
>>> grad, wfn = gradient('mp2', return_wfn=True)
>>> gdma(wfn)
"""
# Start by writing a G* checkpoint file, for the GDMA code to read in
fw = core.FCHKWriter(wfn)
molname = wfn.molecule().name()
prefix = core.get_writer_file_prefix(molname)
fchkfile = prefix + '.fchk'
fw.write(fchkfile)
if datafile:
commands = datafile
else:
densname = wfn.name()
if densname == "DFT":
densname = "SCF"
commands = 'psi4_dma_datafile.dma'
radii = core.get_option('GDMA', 'GDMA_RADIUS')
origin = core.get_option('GDMA', 'GDMA_ORIGIN')
with open(commands, 'w') as f:
f.write("File %s Density %s\n" % (fchkfile, densname))
f.write("Angstrom\n")
f.write("%s\n" % core.get_option('GDMA', 'GDMA_MULTIPOLE_UNITS'))
f.write("Multipoles\n")
if origin:
try:
f.write("Origin %f %f %f\n" % (float(origin[0]), float(origin[1]), float(origin[2])))
except:
raise ValidationError("The GDMA origin array should contain three entries: x, y, and z.")
f.write("Switch %f\n" % core.get_option('GDMA', 'GDMA_SWITCH'))
if radii:
f.write("Radius %s\n" % " ".join([str(r) for r in radii]))
f.write("Limit %d\n" % core.get_option('GDMA', 'GDMA_LIMIT'))
f.write("Start\n")
f.write("Finish\n")
core.run_gdma(wfn, commands)
os.remove(fchkfile)
# If we generated the DMA control file, we should clean up here
if not datafile:
os.remove(commands)
def fchk(wfn, filename):
"""Function to write wavefunction information in *wfn* to *filename* in
Gaussian FCHK format.
.. versionadded:: 0.6
:returns: None
:type filename: string
:param filename: destination file name for FCHK file
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate fchk file
:examples:
>>> # [1] FCHK file for DFT calculation
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> fchk(wfn, 'mycalc.fchk')
"""
fw = core.FCHKWriter(wfn)
fw.write(filename)
def molden(wfn, filename=None, density_a=None, density_b=None, dovirtual=None):
"""Function to write wavefunction information in *wfn* to *filename* in
molden format. Will write natural orbitals from *density* (MO basis) if supplied.
Warning! Most post-SCF Wavefunctions do not build the density as this is often
much more costly than the energy. In addition, the Wavefunction density attributes
(Da and Db) return the SO density and must be transformed to the MO basis
to use with this function.
.. versionadded:: 0.5
*wfn* parameter passed explicitly
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate cube files
:type filename: string
:param filename: destination file name for MOLDEN file (optional)
:type density_a: :py:class:`~psi4.core.Matrix`
:param density_a: density in the MO basis to build alpha NO's from (optional)
:type density_b: :py:class:`~psi4.core.Matrix`
:param density_b: density in the MO basis to build beta NO's from, assumes restricted if not supplied (optional)
:type dovirtual: bool
:param dovirtual: do write all the MOs to the MOLDEN file (true) or discard the unoccupied MOs, not valid for NO's (false) (optional)
:examples:
>>> # [1] Molden file for DFT calculation
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> molden(wfn, 'mycalc.molden')
>>> # [2] Molden file for CI/MCSCF computation using NO roots
>>> E, wfn = energy('ci', return_wfn=True)
>>> molden(wfn, 'no_root1.molden', density_a=wfn.opdm(0, 0, "A", True))
>>> # [3] The following does NOT work, please see below
>>> E, wfn = energy('ccsd', return_wfn=True)
>>> molden(wfn, 'ccsd_no.molden', density_a=wfn.Da())
>>> # [4] This WILL work, note the transformation of Da (SO->MO)
>>> E, wfn = properties('ccsd', properties=['dipole'], return_wfn=True)
>>> Da_so = wfn.Da()
>>> Da_mo = Matrix.triplet(wfn.Ca(), Da_so, wfn.Ca(), True, False, False)
>>> molden(wfn, 'ccsd_no.molden', density_a=Da_mo)
"""
if filename is None:
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".molden"
if dovirtual is None:
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
else:
dovirt = dovirtual
if density_a:
nmopi = wfn.nmopi()
nsopi = wfn.nsopi()
NO_Ra = core.Matrix("NO Alpha Rotation Matrix", nmopi, nmopi)
NO_occa = core.Vector(nmopi)
density_a.diagonalize(NO_Ra, NO_occa, core.DiagonalizeOrder.Descending)
NO_Ca = core.Matrix("Ca Natural Orbitals", nsopi, nmopi)
NO_Ca.gemm(False, False, 1.0, wfn.Ca(), NO_Ra, 0)
if density_b:
NO_Rb = core.Matrix("NO Beta Rotation Matrix", nmopi, nmopi)
NO_occb = core.Vector(nmopi)
density_b.diagonalize(NO_Rb, NO_occb, core.DiagonalizeOrder.Descending)
NO_Cb = core.Matrix("Cb Natural Orbitals", nsopi, nmopi)
NO_Cb.gemm(False, False, 1.0, wfn.Cb(), NO_Rb, 0)
else:
NO_occb = NO_occa
NO_Cb = NO_Ca
mw = core.MoldenWriter(wfn)
mw.write(filename, NO_Ca, NO_Cb, NO_occa, NO_occb, NO_occa, NO_occb, dovirt)
else:
try:
occa = wfn.occupation_a()
occb = wfn.occupation_b()
except AttributeError:
core.print_out("\n!Molden warning: This wavefunction does not have occupation numbers.\n"
"Writing zero's for occupation numbers\n\n")
occa = core.Vector(wfn.nmopi())
occb = core.Vector(wfn.nmopi())
mw = core.MoldenWriter(wfn)
mw.write(filename, wfn.Ca(), wfn.Cb(), wfn.epsilon_a(), wfn.epsilon_b(), occa, occb, dovirt)
# Aliases
opt = optimize
freq = frequency
frequencies = frequency
prop = properties
|
lgpl-3.0
| -2,847,176,584,611,929,000
| 56.55871
| 166
| 0.476895
| false
| 4.430053
| false
| false
| false
|
icgc-dcc/egasub
|
tests/test_dataset.py
|
1
|
1522
|
from egasub.ega.entities.dataset import Dataset
from egasub.ega.entities.dataset_link import DatasetLink
from egasub.ega.entities.attribute import Attribute
links = [DatasetLink('label 1','url1'),DatasetLink('label 2','url2')]
attributes = [Attribute('The tag 1','The value 1','an unit'),Attribute('The tag 2','The value 2','an unit')]
dataset = Dataset('an alias',[3,4,5],3,[6,1,4],[8,21,4],'a title',links,attributes,'dataset description',None,'ega_accession_id')
def test_dataset_type_ids():
assert [3,4,5] == dataset.dataset_type_ids
def test_policy_id():
assert 3 == dataset.policy_id
def test_runs_references():
assert [6,1,4] == dataset.runs_references
def test_analysis_references():
assert [8,21,4] == dataset.analysis_references
def test_dataset_links():
assert links == dataset.dataset_links
def test_attributes():
assert attributes == dataset.attributes
def test_to_dict():
assert cmp(
{
'title' : 'a title',
'datasetTypeIds':[3,4,5],
'policyId':3,
'runsReferences' : [6,1,4],
'analysisReferences' : [8,21,4],
'datasetLinks' : map(lambda dataset_link: dataset_link.to_dict(), links),
'attributes' : map(lambda attribute: attribute.to_dict(), attributes),
'alias' : 'an alias',
'description': 'dataset description',
'egaAccessionId': 'ega_accession_id'
}, dataset.to_dict()) == 0
def test_alias():
assert 'an alias' == dataset.alias
|
gpl-3.0
| -2,194,082,586,187,153,400
| 32.844444
| 129
| 0.634691
| false
| 3.412556
| true
| false
| false
|
nisavid/spruce-collections
|
spruce/collections/_exc.py
|
1
|
1237
|
"""Exceptions"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import exceptions as _py_exc
class Exception(_py_exc.Exception):
pass
class Error(RuntimeError, Exception):
pass
class UnsupportedUniversalSetOperation(Error):
"""
A finite set operation was attempted on a universal set
:param operation:
The attempted operation.
:type operation: :obj:`str`
:param message:
A message that describes the error.
:type message: :obj:`str` or null
"""
def __init__(self, operation, set, message=None, *args):
super(UnsupportedUniversalSetOperation, self)\
.__init__(operation, set, message, *args)
self._message = message
self._operation = operation
self._set = set
def __str__(self):
message = '{} is unsupported by the universal set {!r}'\
.format(self.operation, self.set)
if self.message:
message += ': ' + self.message
return message
@property
def message(self):
return self._message
@property
def operation(self):
return self._operation
@property
def set(self):
return self._set
|
lgpl-3.0
| 4,857,376,909,560,219,000
| 21.089286
| 64
| 0.607114
| false
| 4.295139
| false
| false
| false
|
ScottWales/threddsclient
|
threddsclient/nodes.py
|
1
|
6231
|
"""
Python objects for modelling a Thredds server
"""
from bs4 import BeautifulSoup as BSoup
import urlparse
from .utils import size_in_bytes
import logging
logger = logging.getLogger(__name__)
FILE_SERVICE = "HTTPServer"
OPENDAP_SERVICE = "OPENDAP"
WMS_SERVICE = "WMS"
WCS_SERVICE = "WCS"
class Node(object):
"""
Common items to all nodes
"""
def __init__(self, soup, catalog):
self.soup = soup
self.catalog = catalog
self.name = soup.get('name')
self.content_type = None
self.bytes = None
self.modified = None
def __repr__(self):
return "<Node name: {0.name}, content type: {0.content_type}>".format(self)
class Service(Node):
"""
A Thredds service
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
self.base = soup.get('base')
self.url = urlparse.urljoin(self.catalog.url, self.base)
self.service_type = soup.get('serviceType')
self.content_type = "application/service"
self.services = [Service(s, self.catalog) for s in soup.find_all('service', recursive=False)]
class CatalogRef(Node):
"""
A reference to a different Thredds catalog
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
self.title = soup.get('xlink:title')
self.name = self.title
self.href = soup.get('xlink:href')
self.url = urlparse.urljoin(self.catalog.url, self.href)
self.content_type = "application/directory"
def follow(self):
from .client import read_url
return read_url(self.url)
class Dataset(Node):
"""
Abstract dataset class
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
def is_collection(self):
return False
@property
def ID(self):
return self.soup.get('ID')
@property
def url(self):
return "{0}?dataset={1}".format(self.catalog.url, self.ID)
@property
def authority(self):
authority = None
if self.soup.get('authority'):
authority = self.soup.get('authority')
elif self.soup.metadata:
authority = self.soup.metadata.authority
elif self.soup.parent.metadata:
authority = self.soup.parent.metadata.authority
return authority
@property
def service_name(self):
service_name = None
if self.soup.get('servicename'):
service_name = self.soup.get('servicename')
elif self.soup.metadata:
if self.soup.metadata.serviceName:
service_name = self.soup.metadata.serviceName.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.serviceName:
service_name = self.soup.parent.metadata.serviceName.text
return service_name
@property
def data_type(self):
data_type = None
if self.soup.get('datatype'):
data_type = self.soup.get('datatype')
elif self.soup.metadata:
if self.soup.metadata.dataType:
data_type = self.soup.metadata.dataType.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.dataType:
data_type = self.soup.parent.metadata.dataType.text
return data_type
@property
def data_format_type(self):
data_format_type = None
if self.soup.dataFormatType:
data_format_type = soup.dataFormatType.text
elif self.soup.metadata:
if self.soup.metadata.dataFormatType:
data_type = self.soup.metadata.dataFormatType.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.dataFormatType:
data_format_type = self.soup.parent.metadata.dataFormatType.text
return data_format_type
class CollectionDataset(Dataset):
"""
A container for other datasets
"""
def __init__(self, soup, catalog):
Dataset.__init__(self, soup, catalog)
self.collection_type = soup.get('collectionType')
self.harvest = self._harvest(soup)
# TODO: add attributes for harvesting: contributor, keyword, publisher, summary, rights, ...
# see http://www.unidata.ucar.edu/software/thredds/current/tds/tutorial/CatalogPrimer.html#Describing_datasets
self.content_type = "application/directory"
from .catalog import find_datasets
self.datasets = find_datasets(soup, self.catalog)
from .catalog import find_references
self.references = find_references(soup, self.catalog)
def is_collection(self):
return True
@staticmethod
def _harvest(soup):
return soup.get('harvest', 'false') == 'true'
class DirectDataset(Dataset):
"""
A reference to a data file
"""
def __init__(self, soup, catalog):
Dataset.__init__(self, soup, catalog)
self.url_path = soup.get('urlPath')
self.content_type = "application/netcdf"
self.modified = self._modified(soup)
self.bytes = self._bytes(soup)
def access_url(self, service_type=FILE_SERVICE):
url = None
for service in self.catalog.get_services(self.service_name):
if service.service_type == service_type:
url = urlparse.urljoin(service.url, self.url_path)
break
return url
def download_url(self):
return self.access_url(FILE_SERVICE)
def opendap_url(self):
return self.access_url(OPENDAP_SERVICE)
def wms_url(self):
return self.access_url(WMS_SERVICE)
@staticmethod
def _modified(soup):
modified = None
if soup.date:
if soup.date.get('type') == 'modified':
modified = soup.date.text
return modified
@staticmethod
def _bytes(soup):
size = None
if soup.dataSize:
try:
datasize = float(soup.dataSize.text)
units = soup.dataSize.get('units')
size = size_in_bytes(datasize, units)
except:
logger.exception("dataset size conversion failed")
return size
|
apache-2.0
| -6,310,311,489,160,708,000
| 29.694581
| 118
| 0.606965
| false
| 3.958704
| false
| false
| false
|
Eric89GXL/sphinx-gallery
|
sphinx_gallery/backreferences.py
|
1
|
10042
|
# -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
Backreferences Generator
========================
Parses example file code in order to keep track of used functions
"""
from __future__ import print_function, unicode_literals
import ast
import codecs
import collections
from html import escape
import os
import re
import warnings
from . import sphinx_compatibility
from .scrapers import _find_image_ext
from .utils import _replace_md5
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code.
Only retains names from imported modules.
"""
def __init__(self, global_variables=None):
super(NameFinder, self).__init__()
self.imported_names = {}
self.global_variables = global_variables or {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
class_attr = False
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name, class_attr
elif local_name in self.global_variables:
obj = self.global_variables[local_name]
if remainder and remainder[0] == '.': # maybe meth or attr
method = [remainder[1:]]
class_attr = True
else:
method = []
# Recurse through all levels of bases
classes = [obj.__class__]
offset = 0
while offset < len(classes):
for base in classes[offset].__bases__:
if base not in classes:
classes.append(base)
offset += 1
for cc in classes:
module = cc.__module__.split('.')
class_name = cc.__name__
# a.b.C.meth could be documented as a.C.meth,
# so go down the list
for depth in range(len(module), 0, -1):
full_name = '.'.join(
module[:depth] + [class_name] + method)
yield name, full_name, class_attr
def _from_import(a, b):
imp_line = 'from %s import %s' % (a, b)
scope = dict()
with warnings.catch_warnings(record=True): # swallow warnings
warnings.simplefilter('ignore')
exec(imp_line, scope, scope)
return scope
def _get_short_module_name(module_name, obj_name):
"""Get the shortest possible module name."""
if '.' in obj_name:
obj_name, attr = obj_name.split('.')
else:
attr = None
scope = {}
try:
# Find out what the real object is supposed to be.
scope = _from_import(module_name, obj_name)
except Exception: # wrong object
return None
else:
real_obj = scope[obj_name]
if attr is not None and not hasattr(real_obj, attr): # wrong class
return None # wrong object
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
scope = {}
try:
scope = _from_import(short_name, obj_name)
# Ensure shortened object is the same as what we expect.
assert real_obj is scope[obj_name]
except Exception: # libraries can throw all sorts of exceptions...
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
_regex = re.compile(r':(?:'
r'func(?:tion)?|'
r'meth(?:od)?|'
r'attr(?:ibute)?|'
r'obj(?:ect)?|'
r'class):`(\S*)`'
)
def identify_names(script_blocks, global_variables=None, node=''):
"""Build a codeobj summary by identifying and resolving used names."""
if node == '': # mostly convenience for testing functions
c = '\n'.join(txt for kind, txt, _ in script_blocks if kind == 'code')
node = ast.parse(c)
# Get matches from the code (AST)
finder = NameFinder(global_variables)
if node is not None:
finder.visit(node)
names = list(finder.get_mapping())
# Get matches from docstring inspection
text = '\n'.join(txt for kind, txt, _ in script_blocks if kind == 'text')
names.extend((x, x, False) for x in re.findall(_regex, text))
example_code_obj = collections.OrderedDict() # order is important
fill_guess = dict()
for name, full_name, class_like in names:
if name in example_code_obj:
continue # if someone puts it in the docstring and code
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
splitted = full_name.rsplit('.', 1 + class_like)
if len(splitted) == 1:
splitted = ('builtins', splitted[0])
elif len(splitted) == 3: # class-like
assert class_like
splitted = (splitted[0], '.'.join(splitted[1:]))
else:
assert not class_like
module, attribute = splitted
# get shortened module name
module_short = _get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
if module_short is not None:
example_code_obj[name] = cobj
elif name not in fill_guess:
cobj['module_short'] = module
fill_guess[name] = cobj
for key, value in fill_guess.items():
if key not in example_code_obj:
example_code_obj[key] = value
return example_code_obj
THUMBNAIL_TEMPLATE = """
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{snippet}">
.. only:: html
.. figure:: /{thumbnail}
:ref:`sphx_glr_{ref_name}`
.. raw:: html
</div>
"""
BACKREF_THUMBNAIL_TEMPLATE = THUMBNAIL_TEMPLATE + """
.. only:: not html
* :ref:`sphx_glr_{ref_name}`
"""
def _thumbnail_div(target_dir, src_dir, fname, snippet, is_backref=False,
check=True):
"""Generate RST to place a thumbnail in a gallery."""
thumb, _ = _find_image_ext(
os.path.join(target_dir, 'images', 'thumb',
'sphx_glr_%s_thumb.png' % fname[:-3]))
if check and not os.path.isfile(thumb):
# This means we have done something wrong in creating our thumbnail!
raise RuntimeError('Could not find internal sphinx-gallery thumbnail '
'file:\n%s' % (thumb,))
thumb = os.path.relpath(thumb, src_dir)
full_dir = os.path.relpath(target_dir, src_dir)
# Inside rst files forward slash defines paths
thumb = thumb.replace(os.sep, "/")
ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
return template.format(snippet=escape(snippet),
thumbnail=thumb, ref_name=ref_name)
def _write_backreferences(backrefs, seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Write backreference file including a thumbnail list of examples."""
if gallery_conf['backreferences_dir'] is None:
return
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
seen = backref in seen_backrefs
with codecs.open(include_path, 'a' if seen else 'w',
encoding='utf-8') as ex_file:
if not seen:
heading = 'Examples using ``%s``' % backref
ex_file.write('\n\n' + heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'],
fname, snippet, is_backref=True))
seen_backrefs.add(backref)
def _finalize_backreferences(seen_backrefs, gallery_conf):
"""Replace backref files only if necessary."""
logger = sphinx_compatibility.getLogger('sphinx-gallery')
if gallery_conf['backreferences_dir'] is None:
return
for backref in seen_backrefs:
path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
if os.path.isfile(path):
_replace_md5(path)
else:
level = gallery_conf['log_level'].get('backreference_missing',
'warning')
func = getattr(logger, level)
func('Could not find backreferences file: %s' % (path,))
func('The backreferences are likely to be erroneous '
'due to file system case insensitivity.')
|
bsd-3-clause
| 6,789,566,557,589,119,000
| 34.985663
| 79
| 0.555777
| false
| 4.025662
| false
| false
| false
|
snark/ignorance
|
tests/test_git_walk.py
|
1
|
6883
|
import ignorance
import os
try:
# pathlib is in python stdlib in python 3.5+
from pathlib import Path
except ImportError:
from pathlib2 import Path
import pytest
def test_basic_walk(tmpdir_builder):
path = tmpdir_builder.setup('git/basic_match')
files = []
for r, d, f in ignorance.git.walk(path):
files.extend(f)
assert files == ['.gitignore', 'bam', 'foo', 'ignored', 'zap']
def test_negation(tmpdir_builder):
path = tmpdir_builder.setup('git/negation')
files = []
for r, d, f in ignorance.git.walk(path):
files.extend(f)
assert 'bar' in files
assert 'baz.tmpx' in files
assert 'override.tmp' in files
assert 'quux' in files
assert 'foo.tmp' not in files
assert 'order_counts.tmp' not in files
def test_overrides(tmpdir_builder):
path = tmpdir_builder.setup('git/negation')
pathobj = Path(path)
files = []
overrides = ['*.tmpx', '!foo.tmp', 'override.*']
for r, d, fs in ignorance.git.walk(path, overrides=overrides):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'bar' in files
assert 'baz.tmpx' not in files
assert 'override.tmp' not in files
assert 'zap/baz/quux' in files
assert 'foo.tmp' in files
assert 'order_counts.tmp' not in files
assert 'zap/foo.tmp' in files
# Overrides are rooted to the starting directory.
files = []
overrides = ['!foo.tmp', 'zap/foo.tmp']
for r, d, fs in ignorance.git.walk(path, overrides=overrides):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo.tmp' in files
assert 'zap/foo' not in files
def test_directory_only(tmpdir_builder):
path = tmpdir_builder.setup('git/directory-only')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo/bar' not in files
assert 'foo/baz' not in files
assert 'foo/foo' not in files
assert 'bar/bar' not in files
assert 'bar/baz' not in files
assert 'bar/foo' not in files
assert 'baz/bar' not in files
# foo/ is directory only, so...
assert 'baz/foo' in files
# Unmatched by anything.
assert 'baz/baz' in files
def test_ignore_completely(tmpdir_builder):
path = tmpdir_builder.setup('git/ignore-completely-1')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
# Default ignore is '.git'
assert '.git/foo' not in files
assert 'foo' in files
assert 'bar/baz' in files
assert 'bar/zap' in files
assert 'baz/.git' not in files
# Ignore-completely may be changed in the caller
files = []
for r, d, fs in ignorance.git.walk(path,
ignore_completely=['foo', 'bar/']):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert '.git/foo' not in files
assert '.git/bar' in files
assert 'foo' not in files
assert 'bar/baz' not in files
assert 'bar/zap' not in files
assert 'baz/.git' in files
assert 'zap/foo' not in files
# No negation rules allowed in ignore-completely
with pytest.raises(ValueError) as einfo:
for r, d, fs in ignorance.git.walk(
path, ignore_completely=['foo', 'bar/', '!baz']):
pass
assert str(einfo.value) == 'negation rules are not allowed in the ignore'\
+ ' completely rules'
# Ignore-completely may be disabled in the caller
for r, d, fs in ignorance.git.walk(path, ignore_completely=False):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert '.git/foo' in files
assert '.git/bar' in files
assert 'foo' in files
assert 'bar/baz' in files
assert 'bar/zap' in files
assert 'baz/.git' in files
assert 'zap/foo' in files
path = tmpdir_builder.setup('git/ignore-completely-2')
# Ignore completely is non-overrideable within ignore files
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo' in files
assert '.git/foo' not in files
assert 'bar/baz' in files
assert 'baz/.git' not in files
def test_nesting(tmpdir_builder):
path = tmpdir_builder.setup('git/nesting')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo' not in files
assert 'dir_a/foo' in files
assert 'dir_a/bar' not in files
assert 'dir_a/baz' in files
assert 'dir_b/foo' not in files
assert 'dir_b/bar' not in files
assert 'dir_b/baz' not in files
assert 'dir_b/dir_a/foo' not in files
# Anchoring is relative *to the gitignore file*
assert 'dir_b/dir_a/bar' in files
assert 'dir_b/dir_a/baz' not in files
def test_anchoring(tmpdir_builder):
path = tmpdir_builder.setup('git/anchoring')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
# foo is unanchored
assert not any([f for f in files if 'foo' in f])
# dir_a/bar is anchored to the .gitignore file
assert 'dir_a/bar' not in files
assert 'dir_b/dir_a/bar' in files
# dir_a/baz is not anchored, due to a double-asterisk
assert 'dir_a/baz' not in files
assert 'dir_b/dir_a/baz' not in files
# */zap is anchored to the .gitignore file
assert 'dir_a/zap' not in files
assert 'dir_b/zap' not in files
assert 'dir_c/zap' not in files
assert 'dir_b/dir_a/zap' in files
assert 'dir_c/1/zap' in files
assert 'dir_c/2/1/zap' in files
# Any quux under dir_c should be ignored, due to a double-asterisk
assert 'dir_a/quux' in files
assert 'quux' in files
assert 'dir_c/quux' not in files
assert 'dir_c/1/quux' not in files
assert 'dir_c/2/1/quux' not in files
# Leading slash anchors to the root
assert 'xyzzy' not in files
assert 'dir_a/xyzzy' in files
# Finally, any .eggs file under spam/ should be ignored
assert 'dir_a/spam.eggs' in files
assert 'spam/ham/eggs' in files
assert 'spam/spam/eggs' in files
assert 'spam/ham.eggs' not in files
assert 'spam/ham/ham.eggs' not in files
assert 'spam/ham/spam.eggs' not in files
assert 'spam/spam/ham.eggs' not in files
assert 'spam/spam/spam.eggs' not in files
|
isc
| 1,623,514,140,519,473,200
| 34.663212
| 78
| 0.634171
| false
| 3.208858
| true
| false
| false
|
renmengye/resnet
|
resnet/utils/lr_schedule.py
|
1
|
3191
|
"""Learning rate scheduler utilities."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from resnet.utils import logger
log = logger.get()
class FixedLearnRateScheduler(object):
"""Adjusts learning rate according to a fixed schedule."""
def __init__(self, sess, model, base_lr, lr_decay_steps, lr_list=None):
"""
Args:
sess: TensorFlow session object.
model: Model object.
base_lr: Base learning rate.
lr_decay_steps: A list of step number which we perform learning decay.
lr_list: A list of learning rate decay multiplier. By default, all 0.1.
"""
self.model = model
self.sess = sess
self.lr = base_lr
self.lr_list = lr_list
self.lr_decay_steps = lr_decay_steps
self.model.assign_lr(self.sess, self.lr)
def step(self, niter):
"""Adds to counter. Adjusts learning rate if necessary.
Args:
niter: Current number of iterations.
"""
if len(self.lr_decay_steps) > 0:
if (niter + 1) == self.lr_decay_steps[0]:
if self.lr_list is not None:
self.lr = self.lr_list[0]
else:
self.lr *= 0.1 ## Divide 10 by default!!!
self.model.assign_lr(self.sess, self.lr)
self.lr_decay_steps.pop(0)
log.warning("LR decay steps {}".format(self.lr_decay_steps))
if self.lr_list is not None:
self.lr_list.pop(0)
elif (niter + 1) > self.lr_decay_steps[0]:
ls = self.lr_decay_steps
while len(ls) > 0 and (niter + 1) > ls[0]:
ls.pop(0)
log.warning("LR decay steps {}".format(self.lr_decay_steps))
if self.lr_list is not None:
self.lr = self.lr_list.pop(0)
else:
self.lr *= 0.1
self.model.assign_lr(self.sess, self.lr)
class ExponentialLearnRateScheduler(object):
"""Adjusts learning rate according to an exponential decay schedule."""
def __init__(self, sess, model, base_lr, offset_steps, total_steps, final_lr,
interval):
"""
Args:
sess: TensorFlow session object.
model: Model object.
base_lr: Base learning rate.
offset_steps: Initial non-decay steps.
total_steps: Total number of steps.
final_lr: Final learning rate by the end of training.
interval: Number of steps in between learning rate updates (staircase).
"""
self.model = model
self.sess = sess
self.lr = base_lr
self.offset_steps = offset_steps
self.total_steps = total_steps
self.time_constant = (total_steps - offset_steps) / np.log(base_lr /
final_lr)
self.final_lr = final_lr
self.interval = interval
self.model.assign_lr(self.sess, self.lr)
def step(self, niter):
"""Adds to counter. Adjusts learning rate if necessary.
Args:
niter: Current number of iterations.
"""
if niter > self.offset_steps:
steps2 = niter - self.offset_steps
if steps2 % self.interval == 0:
new_lr = base_lr * np.exp(-steps2 / self.time_constant)
self.model.assign_lr(self.sess, new_lr)
|
mit
| -5,248,840,209,165,210,000
| 32.239583
| 79
| 0.606393
| false
| 3.589426
| false
| false
| false
|
sunjinopensource/threadactive
|
examples/example1/main.py
|
1
|
1330
|
import time
import threading
import threadactive
class BackWorker(threadactive.Agent):
def tick(self):
threadactive.Agent.tick(self)
print("[%s][%d] front" % (threading.current_thread().getName(), time.clock()) )
self.print_in_front2()
self.print_in_back()
time.sleep(1)
@threadactive.backend
def print_in_back(self, *args, **kwargs):
print("[%s][%d] back" % (threading.current_thread().getName(), time.clock()) )
self.print_in_back2()
if time.clock() > 3:
self.back_to_front()
@threadactive.frontend
def back_to_front(self, *args, **kwargs):
print("[%s][%d] back to front" % (threading.current_thread().getName(), time.clock()) )
@threadactive.frontend
def print_in_front2(self, *args, **kwargs):
print("[%s][%d] front2" % (threading.current_thread().getName(), time.clock()) )
@threadactive.backend
def print_in_back2(self, *args, **kwargs):
print("[%s][%d] back2" % (threading.current_thread().getName(), time.clock()) )
def main():
i = 0
bw = BackWorker()
while True:
bw.tick()
# restart backend thread
i += 1
if i > 5:
bw.stop_backend()
bw.start_backend()
i = 0
if __name__ == '__main__':
main()
|
mit
| 4,238,513,690,836,113,000
| 26.729167
| 95
| 0.56391
| false
| 3.463542
| false
| false
| false
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/tools/datetime_strftime.py
|
1
|
1478
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
# Copyright (c) 2002-2007 John D. Hunter; All Rights Reserved
import time
def datetime_strftime(date, fmt):
'''
Allow datetime strftime formatting for years before 1900.
See http://bugs.python.org/issue1777412
'''
if date.year > 1900:
return date.strftime(fmt)
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while True:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
year = date.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = date.timetuple()
string1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(string1, str(year))
string2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(string2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
syear = "%4d" % (date.year,)
for site in sites:
string1 = string1[:site] + syear + string1[site + 4:]
return string1
|
gpl-3.0
| -6,176,057,413,424,030,000
| 28.56
| 72
| 0.583221
| false
| 3.676617
| false
| false
| false
|
chovanecm/sacredboard
|
sacredboard/app/data/pymongo/metricsdao.py
|
1
|
3216
|
"""
Module responsible for accessing the Metrics data in MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/60
"""
from bson import ObjectId
from bson.errors import InvalidId
from sacredboard.app.data import NotFoundError
from .genericdao import GenericDAO
from ..metricsdao import MetricsDAO
class MongoMetricsDAO(MetricsDAO):
"""Implementation of MetricsDAO for MongoDB."""
def __init__(self, generic_dao: GenericDAO):
"""
Create new metrics accessor for MongoDB.
:param generic_dao: A configured generic MongoDB data access object
pointing to an appropriate database.
"""
self.generic_dao = generic_dao
self.metrics_collection_name = "metrics"
"""Name of the MongoDB collection with metrics."""
def get(self, run_id, metric_id):
"""
Read a metric of the given id and run.
The returned object has the following format (timestamps are datetime
objects).
.. code::
{"steps": [0,1,20,40,...],
"timestamps": [timestamp1,timestamp2,timestamp3,...],
"values": [0,1 2,3,4,5,6,...],
"name": "name of the metric",
"metric_id": "metric_id",
"run_id": "run_id"}
:param run_id: ID of the Run that the metric belongs to.
:param metric_id: The ID fo the metric.
:return: The whole metric as specified.
:raise NotFoundError
"""
run_id = self._parse_run_id(run_id)
query = self._build_query(run_id, metric_id)
row = self._read_metric_from_db(metric_id, run_id, query)
metric = self._to_intermediary_object(row)
return metric
def delete(self, run_id):
"""
Delete all metrics belonging to the given run.
:param run_id: ID of the Run that the metric belongs to.
"""
self.generic_dao.delete_record(
self.metrics_collection_name,
{"run_id": self._parse_run_id(run_id)})
def _read_metric_from_db(self, metric_id, run_id, query):
row = self.generic_dao.find_record(self.metrics_collection_name,
query)
if row is None:
raise NotFoundError("Metric %s for run %s not found."
% (metric_id, run_id))
return row
def _parse_run_id(self, run_id):
id = None
try:
id = int(run_id)
except ValueError:
id = run_id
return id
def _build_query(self, run_id, metric_id):
# Metrics in MongoDB is always an ObjectId
try:
id = ObjectId(metric_id)
return {"run_id": self._parse_run_id(run_id), "_id": id}
except InvalidId as ex:
raise NotFoundError("Metric Id %s is invalid "
"ObjectId in MongoDB" % metric_id) from ex
def _to_intermediary_object(self, row):
return {
"metric_id": str(row["_id"]),
"run_id": row["run_id"],
"name": row["name"],
"steps": row["steps"],
"timestamps": row["timestamps"],
"values": row["values"],
}
|
mit
| 2,635,387,023,477,506,000
| 31.16
| 77
| 0.560634
| false
| 3.95086
| false
| false
| false
|
jecki/MetaInductionSim
|
PyPlotter/qtGfx.py
|
3
|
10165
|
#!/usr/bin/python
# qtGfx - Implementation of the Gfx.Driver Interface in a
# qt evnironment
"""Implementes Gfx.Driver using the qt GUI toolkit.
"""
import sys, math
try:
from PyQt5.Qt import Qt
from PyQt5.QtCore import pyqtSignal as SIGNAL
from PyQt5.QtCore import QPoint, QObject
from PyQt5.QtWidgets import QApplication, QLabel
import PyQt5.QtGui as qt
QT3 = False
QT5 = True
except ImportError:
QT5 = False
try:
from PyQt4.Qt import Qt, SIGNAL
from PyQt4.QtCore import QPoint, QObject
import PyQt4.QtGui as qt
from PyQt4.QtGui import QApplication, QLabel
QT3 = False
except ImportError:
import qt
from qt import Qt, SIGNAL, QPoint, QObject, QApplication, QLabel
QT3 = True
try:
import Gfx
except ImportError:
from . import Gfx
driverName = "qtGfx"
########################################################################
#
# class Driver
#
########################################################################
class Driver(Gfx.Driver):
"""A graphics driver for qt4.
For an explanation of the inherited methods see Gfx.py.
"""
def __init__(self, paintDevice):
"""Initialize canvas on the QPaintDevice 'paintDevice'."""
Gfx.Driver.__init__(self)
self.paintDevice = None
self.painter = qt.QPainter()
self.font = qt.QFont("SansSerif", 12, qt.QFont.Normal, False)
self.pen = qt.QPen()
self.pen.setCapStyle(Qt.RoundCap)
self.pen.setJoinStyle(Qt.RoundJoin)
self.brush = qt.QBrush(Qt.SolidPattern)
self.color = (0.0, 0.0, 0.0)
self.w, self. h = 640, 480
self.changePaintDevice(paintDevice)
self.reset()
self.clear()
def changePaintDevice(self, paintDevice):
"""Use a new QPaintDevice for the following drawing commands."""
oldPaintDevice = self.paintDevice
if oldPaintDevice:
self.painter.end()
self.paintDevice = paintDevice
self.painter.begin(self.paintDevice)
self.resizedGfx()
self.painter.setPen(self.pen)
self.painter.setBrush(Qt.NoBrush)
self.painter.setBackgroundMode(Qt.TransparentMode)
if QT3:
self.painter.setBackgroundColor(qt.QColor(255,255,255))
else:
backgroundBrush = qt.QBrush(qt.QColor(255,255,255), Qt.SolidPattern)
self.painter.setBackground(backgroundBrush)
self.painter.setFont(self.font)
return oldPaintDevice
def getPaintDevice(self):
"""-> QPaintDevice of this graphics drivers object"""
return self.paintDevice
def _qtEnd(self):
"""Calls end() method of the QPainter obejct. Before any
drawing can be done again qtBegin() must be called."""
self.painter.end()
def _qtBegin(self):
"""Calls begin() method of the QPainter obejct."""
self.painter.begin()
def resizedGfx(self):
self.w, self.h = self.paintDevice.width(), self.paintDevice.height()
def getSize(self):
return self.w, self.h
def getResolution(self):
return 100
def setColor(self, rgbTuple):
self.color = rgbTuple
qtCol = qt.QColor(int(round(rgbTuple[0]*255)),
int(round(rgbTuple[1]*255)),
int(round(rgbTuple[2]*255)))
self.pen.setColor(qtCol)
self.brush.setColor(qtCol)
self.painter.setPen(self.pen)
def setLineWidth(self, width):
self.lineWidth = width
if width == Gfx.THIN: tn = 1
elif width == Gfx.MEDIUM: tn = 2
elif width == Gfx.THICK: tn = 3
else: raise ValueError("'thickness' must be 'thin', 'medium' or thick' !")
self.pen.setWidth(tn)
self.painter.setPen(self.pen)
def setLinePattern(self, pattern):
self.linePattern = pattern
if pattern == Gfx.CONTINUOUS: lp = Qt.SolidLine
elif pattern == Gfx.DASHED: lp = Qt.DashLine
elif pattern == Gfx.DOTTED: lp = Qt.DotLine
else: raise ValueError("'pattern' must be 'continuous','dashed' " + \
"or 'dotted'")
self.pen.setStyle(lp)
self.painter.setPen(self.pen)
def setFillPattern(self, pattern):
self.fillPattern = pattern
if pattern == Gfx.SOLID: fp = Qt.SolidPattern
elif pattern == Gfx.PATTERN_A: fp = Qt.BDiagPattern
elif pattern == Gfx.PATTERN_B: fp = Qt.FDiagPattern
elif pattern == Gfx.PATTERN_C: fp = Qt.DiagCrossPattern
else: raise ValueError("'pattern' must be 'solid' or 'patternA', " + \
"'patternB', 'patternC' !")
self.brush.setStyle(fp)
def setFont(self, ftype, size, weight):
self.fontType = ftype
self.fontSize = size
self.fontWeight = weight
if ftype == Gfx.SANS: ff = "SansSerif"
elif ftype == Gfx.SERIF: ff = "Serif"
elif ftype == Gfx.FIXED: ff = "Typewriter"
else: raise ValueError("'type' must be 'sans', 'serif' or 'fixed' !")
if size == Gfx.SMALL: fs = 8
elif size == Gfx.NORMAL: fs = 12
elif size == Gfx.LARGE: fs = 16
else: raise ValueError("'size' must be 'small', 'normal' or 'large' !")
fst = False
fw = qt.QFont.Normal
if "i" in weight: fst = True
elif "b" in weight: fw = qt.QFont.Bold
self.font = qt.QFont(ff, fs, fw, fst)
self.painter.setFont(self.font)
def getTextSize(self, text):
fm = self.painter.fontMetrics()
return fm.width(text), fm.height()
# except AttributeError:
# if self.fontSize == Gfx.SMALL: fs = 8
# elif self.fontSize == Gfx.NORMAL: fs = 12
# elif self.fontSize == Gfx.LARGE: fs = 16
# return (len(text) * fs * 2/3, fs) # very inexact
def drawPoint(self, x, y):
self.painter.drawPoint(x, self.h-y-1)
# if self.lineWidth == Gfx.THIN:
# self.dc.DrawPoint(x, self.h-y-1)
# else:
# self.dc.DrawLine(x, self.h-y-1, x, self.h-y-1)
def drawLine(self, x1, y1, x2, y2):
self.painter.drawLine(x1, self.h-y1-1, x2, self.h-y2-1)
def drawRect(self, x, y, w, h):
self.painter.drawRect(x, self.h-y-h, w-1 ,h-1)
def drawPoly(self, array):
if array:
points = [QPoint(p[0],self.h-p[1]-1) for p in array]
if QT3:
pointArray = qt.QPointArray(len(points))
for i in range(len(points)):
pointArray.setPoint(i, points[i])
self.painter.drawPolygon(pointArray)
else:
self.painter.drawPolyline(qt.QPolygon(points))
def drawCircle(self, x, y, r):
self.painter.drawEllipse(x-r, self.h-y-1-r, 2*r, 2*r)
def fillRect(self, x, y, w, h):
self.painter.fillRect(x, self.h-y-h, w, h, self.brush)
def fillPoly(self, array):
if array:
points = [QPoint(p[0],self.h-p[1]-1) for p in array]
self.painter.setBrush(self.brush); self.painter.setPen(Qt.NoPen)
if QT3:
pointArray = qt.QPointArray(len(points))
for i in range(len(points)):
pointArray.setPoint(i, points[i])
self.painter.drawPolygon(pointArray)
else:
self.painter.drawPolygon(qt.QPolygon(points))
self.painter.setPen(self.pen); self.painter.setBrush(Qt.NoBrush)
def fillCircle(self, x, y, r):
self.painter.setBrush(self.brush); self.painter.setPen(Qt.NoPen)
self.painter.drawEllipse(x-r, self.h-y-1-r, 2*r, 2*r)
self.painter.setPen(self.pen); self.painter.setBrush(Qt.NoBrush)
def writeStr(self, x, y, strg, rotationAngle=0.0):
h = self.getTextSize(strg)[1]
if rotationAngle == 0.0:
self.painter.drawText(x, self.h-y-h/4, strg)
else:
rotationAngle = 360.0-rotationAngle
cx = x
cy = self.h-y
self.painter.translate(cx, cy)
self.painter.rotate(rotationAngle)
self.painter.translate(-cx, -cy)
self.painter.drawText(x, self.h-y-h/4, strg)
if QT3:
self.painter.resetXForm()
else:
self.painter.resetTransform()
########################################################################
#
# class Window
#
########################################################################
class Window(Driver, Gfx.Window):
def __init__(self, size=(640, 480), title="qt.Graph", app=None):
Gfx.Window.__init__(self, size, title)
if app != None:
self.app = app
else:
self.app = QApplication(sys.argv)
self.pixmap = qt.QPixmap(size[0], size[1])
self.pixmap.fill(qt.QColor(255,255,255))
self.win = QLabel("", None)
self.win.setPixmap(self.pixmap)
self.win.show()
#self.win.setMinimumSize(size[0], size[1])
#self.win.setMaximum(size[0], size[1])
self.win.resize(size[0], size[1])
if QT5:
#self.lastClosedSignal = SIGNAL("lastWindowClosed()")
self.app.lastWindowClosed.connect(self._qtEnd)
else:
QObject.connect(self.app, SIGNAL("lastWindowClosed()"), self._qtEnd)
Driver.__init__(self, self.pixmap)
def refresh(self):
self.win.setPixmap(self.pixmap)
self.win.update()
def quit(self):
self._qtEnd()
self.win.close()
self.win = None
self.app.quit()
def waitUntilClosed(self):
self.refresh()
if QT3:
self.app.exec_loop()
else:
self.app.exec_()
########################################################################
#
# Test
#
########################################################################
if __name__ == "__main__":
import systemTest
systemTest.Test_qtGfx()
|
mit
| 7,680,707,670,072,934,000
| 32.883333
| 82
| 0.546188
| false
| 3.538113
| false
| false
| false
|
AlanProject/day08
|
MyFTP_Client/modules/main.py
|
1
|
5250
|
#-*- coding:utf-8 -*-
#/usr/bin/env python
import sys,os
import pickle
import socket
class ClientArgv(object):
def __init__(self,argvs):
self.argvs = argvs
self.argvs_parser()
self.handle()
def handle(self):
self.connect()
#接收打印欢迎信息
server_data = self.client_socket.recv(1024)
print server_data
if self.auther():
self.comm_argv()
#处理参数
def argvs_parser(self):
argv_list = ['-s','-p']
if len(self.argvs) < 5:
self.help()
sys.exit()
for i in argv_list:
if i not in self.argvs:
sys.exit('Argv is not found please try again !!!')
try:
self.host = self.argvs[self.argvs.index('-s')+1]
self.port = int(self.argvs[self.argvs.index('-p')+1])
except (ValueError,IndexError) as e:
self.help()
sys.exit()
#定义help信息
def help(self):
print '''
MyFTP Client command argv
-s :Server Host Address IP or Domain
-p :Server Port
'''
def comm_help(self):
print '''
get [file] :Download file
put [file] :Upload file
cd [path] :change dir path
rm [path] :delete file
exit :exit Ftp system
'''
#连接服务器端socket
def connect(self):
try:
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.client_socket.connect((self.host,self.port))
except socket.error as e:
sys.exit('connect server filed')
#用户认证模块
def auther(self):
auther_count = 0
while auther_count < 3:
user_name = raw_input('Please input username:')
if len(user_name) == 0:continue
user_pass = raw_input('Please input passwd:')
if len(user_pass) == 0:continue
data = pickle.dumps({'user_name':user_name,'user_pass':user_pass})
self.client_socket.send(data)
server_data = self.client_socket.recv(1024)
if server_data == '200':
return True
else:
print '%s user name or password error please try agin'%server_data
auther_count += 1
else:
sys.exit('User or Passwd too many mistakes')
#命令调度
def comm_argv(self):
while True:
self.command = raw_input('>>>')
if len(self.command.split()) == 0:continue
if hasattr(self,self.command.split()[0]):
func = getattr(self,self.command.split()[0])
func()
else:
self.comm_help()
#下载文件
def get(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
self.client_socket.send(self.command)
status_coding = self.client_socket.recv(1024)
if status_coding == '203':
print 'file is not found'
else:
self.client_socket.send('start')
file_size = int(self.client_socket.recv(1024))
self.client_socket.send('ok')
file_data = 0
with open(comm_list[1],'wb') as file_write:
while file_data != file_size:
data = self.client_socket.recv(2048)
file_write.write(data)
file_data += len(data)
print '%s Transfer ok'%comm_list[1]
self.client_socket.send('ok')
#上传文件
def put(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
#发送命令
self.client_socket.send(self.command)
#接受服务器确认收到命令的消息
self.client_socket.recv(1024)
if not os.path.isfile(comm_list[1]):
print 'File is not found'
else:
file_size = str(os.path.getsize(comm_list[1]))
self.client_socket.send(file_size)
self.client_socket.recv(100)
file_data = 0
with open(comm_list[1],'rb') as file_read:
while file_data != int(file_size):
data = file_read.read(2048)
file_data += len(data)
self.client_socket.sendall(data)
self.client_socket.recv(1024)
#列出文件目录
def ls(self):
self.client_socket.send(self.command)
file_number = int(self.client_socket.recv(1024))
self.client_socket.send('OK')
for i in range(file_number):
self.client_socket.send('ok')
file_name = self.client_socket.recv(1024)
print file_name
def rm(self):
self.client_socket.send(self.command)
rm_data = self.client_socket.recv(1024)
print rm_data
#切换文件目录
def cd(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
#退出FTP客户端
def exit(self):
sys.exit('Exiting')
|
apache-2.0
| 6,361,247,776,647,331,000
| 33
| 82
| 0.516471
| false
| 3.59408
| false
| false
| false
|
ctu-yfsg/2015-a-grass-reclass
|
reclassify/Layout/PreviewPanel.py
|
1
|
2207
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Subclass of wx.Panel"""
#-----------------------------------------------------------------------------------------
#Import
try:
#wxPython
import wx
#python std library
import sys
#GRASS modules and packages
from modules.colorrules import BufferedWindow
from core.render import Map
#our modules and packages
except ImportError as err:
print(u"ImportError: {}".format(err))
sys.exit("-1")
#-----------------------------------------------------------------------------------------
class PreviewPanel(wx.Panel):
"""
Subclass of wx.Panel.
Represents center part of the window.
Contains preview of the reclassified mapset.
"""
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
#PREVIEW
self.__buildPreviewPanel()
#LAYOUT
self.__layout()
#self.previewPanel.Hide() #hidden when no preview active
#-----------------------------------------------------------------------------------------
def __buildPreviewPanel(self):
"""
Creates preview panel.
:return: void
"""
#Output preview
self.map = Map()
self.width = self.map.width = 400
self.height = self.map.height = 300
self.map.geom = self.width, self.height
self.preview = BufferedWindow(parent=self,
id=wx.NewId(),
size = (400, 300),
Map=self.map)
self.preview.EraseMap()
#-----------------------------------------------------------------------------------------
def __layout(self):
"""
Specifies final layout for PreviewPanel.
:return: void
"""
sBox = wx.StaticBox(self, wx.NewId(), "Preview")
vBox = wx.StaticBoxSizer(sBox, wx.VERTICAL)
vBox.AddStretchSpacer()
vBox.Add(self.preview, 0, wx.CENTER)
vBox.AddStretchSpacer()
self.SetSizer(vBox)
#-----------------------------------------------------------------------------------------
if __name__ == "__main__":
pass
|
gpl-2.0
| 7,031,197,689,677,351,000
| 27.675325
| 94
| 0.434073
| false
| 4.89357
| false
| false
| false
|
fabricematrat/py-macaroon-bakery
|
macaroonbakery/identity.py
|
1
|
4152
|
# Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
import abc
import macaroonbakery as bakery
class Identity(object):
''' Holds identity information declared in a first party caveat added when
discharging a third party caveat.
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def id(self):
''' Returns the id of the user.
May be an opaque blob with no human meaning. An id is only considered
to be unique with a given domain.
:return string
'''
raise NotImplementedError('id method must be defined in subclass')
@abc.abstractmethod
def domain(self):
'''Return the domain of the user.
This will be empty if the user was authenticated
directly with the identity provider.
:return string
'''
raise NotImplementedError('domain method must be defined in subclass')
class ACLIdentity(Identity):
''' ACLIdentity may be implemented by Identity implementations
to report group membership information.
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def allow(self, ctx, acls):
''' reports whether the user should be allowed to access
any of the users or groups in the given acl list.
:param ctx(AuthContext) is the context of the authorization request.
:param acls array of string acl
:return boolean
'''
raise NotImplementedError('allow method must be defined in subclass')
class SimpleIdentity(ACLIdentity):
''' A simple form of identity where the user is represented by a string.
'''
def __init__(self, user):
self._identity = user
def domain(self):
''' A simple identity has no domain.
'''
return ''
def id(self):
'''Return the user name as the id.
'''
return self._identity
def allow(self, ctx, acls):
'''Allow access to any ACL members that was equal to the user name.
That is, some user u is considered a member of group u and no other.
'''
for acl in acls:
if self._identity == acl:
return True
return False
class IdentityClient(object):
''' Represents an abstract identity manager. User identities can be based
on local informaton (for example HTTP basic auth) or by reference to an
external trusted third party (an identity manager).
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def identity_from_context(self, ctx):
''' Returns the identity based on information in the context.
If it cannot determine the identity based on the context, then it
should return a set of caveats containing a third party caveat that,
when discharged, can be used to obtain the identity with
declared_identity.
It should only raise an error if it cannot check the identity
(for example because of a database access error) - it's
OK to return all zero values when there's
no identity found and no third party to address caveats to.
@param ctx an AuthContext
:return: an Identity and array of caveats
'''
raise NotImplementedError('identity_from_context method must be '
'defined in subclass')
@abc.abstractmethod
def declared_identity(self, ctx, declared):
'''Parses the identity declaration from the given declared attributes.
TODO take the set of first party caveat conditions instead?
@param ctx (AuthContext)
@param declared (dict of string/string)
:return: an Identity
'''
raise NotImplementedError('declared_identity method must be '
'defined in subclass')
class NoIdentities(IdentityClient):
''' Defines the null identity provider - it never returns any identities.
'''
def identity_from_context(self, ctx):
return None, None
def declared_identity(self, ctx, declared):
raise bakery.IdentityError('no identity declared or possible')
|
lgpl-3.0
| -1,514,180,123,860,796,000
| 31.952381
| 78
| 0.646435
| false
| 4.788927
| false
| false
| false
|
saaros/pghoard
|
pghoard/receivexlog.py
|
1
|
2656
|
"""
pghoard - pg_receivexlog handler
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import datetime
import logging
import select
import subprocess
import time
from .common import set_subprocess_stdout_and_stderr_nonblocking, terminate_subprocess
from .pgutil import get_connection_info
from threading import Thread
class PGReceiveXLog(Thread):
def __init__(self, config, connection_string, xlog_location, slot, pg_version_server):
super().__init__()
self.log = logging.getLogger("PGReceiveXLog")
self.config = config
self.connection_string = connection_string
self.xlog_location = xlog_location
self.slot = slot
self.pg_version_server = pg_version_server
self.pid = None
self.running = False
self.latest_activity = datetime.datetime.utcnow()
self.log.debug("Initialized PGReceiveXLog")
def run(self):
self.running = True
command = [
self.config["pg_receivexlog_path"],
"--status-interval", "1",
"--verbose",
"--directory", self.xlog_location,
]
if self.pg_version_server < 90300:
conn_info = get_connection_info(self.connection_string)
if "user" in conn_info:
command.extend(["--user", conn_info["user"]])
if "port" in conn_info:
command.extend(["--port", conn_info["port"]])
if "host" in conn_info:
command.extend(["--host", conn_info["host"]])
else:
command.extend(["--dbname", self.connection_string])
if self.pg_version_server >= 90400 and self.slot:
command.extend(["--slot", self.slot])
self.log.debug("Starting to run: %r", command)
start_time = time.time()
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
set_subprocess_stdout_and_stderr_nonblocking(proc)
self.pid = proc.pid
self.log.info("Started: %r, running as PID: %r", command, self.pid)
while self.running:
rlist, _, _ = select.select([proc.stdout, proc.stderr], [], [], 1.0)
for fd in rlist:
content = fd.read()
if content:
self.log.debug(content)
self.latest_activity = datetime.datetime.utcnow()
if proc.poll() is not None:
break
rc = terminate_subprocess(proc, log=self.log)
self.log.debug("Ran: %r, took: %.3fs to run, returncode: %r",
command, time.time() - start_time, rc)
self.running = False
|
apache-2.0
| -5,884,344,457,204,641,000
| 34.891892
| 90
| 0.583961
| false
| 3.952381
| false
| false
| false
|
tinloaf/home-assistant
|
tests/helpers/test_template.py
|
1
|
38585
|
"""Test Home Assistant template helper methods."""
import asyncio
from datetime import datetime
import unittest
import random
import math
from unittest.mock import patch
from homeassistant.components import group
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
from homeassistant.util.unit_system import UnitSystem
from homeassistant.const import (
LENGTH_METERS,
TEMP_CELSIUS,
MASS_GRAMS,
VOLUME_LITERS,
MATCH_ALL,
)
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
import pytest
class TestHelpersTemplate(unittest.TestCase):
"""Test the Template."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up the tests."""
self.hass = get_test_home_assistant()
self.hass.config.units = UnitSystem('custom', TEMP_CELSIUS,
LENGTH_METERS, VOLUME_LITERS,
MASS_GRAMS)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_referring_states_by_entity_id(self):
"""Test referring states by entity id."""
self.hass.states.set('test.object', 'happy')
assert 'happy' == \
template.Template(
'{{ states.test.object.state }}', self.hass).render()
def test_iterating_all_states(self):
"""Test iterating all states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.temperature', 10)
assert '10happy' == \
template.Template(
'{% for state in states %}{{ state.state }}{% endfor %}',
self.hass).render()
def test_iterating_domain_states(self):
"""Test iterating domain states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.back_door', 'open')
self.hass.states.set('sensor.temperature', 10)
assert 'open10' == \
template.Template("""
{% for state in states.sensor %}{{ state.state }}{% endfor %}
""", self.hass).render()
def test_float(self):
"""Test float."""
self.hass.states.set('sensor.temperature', '12')
assert '12.0' == \
template.Template(
'{{ float(states.sensor.temperature.state) }}',
self.hass).render()
assert 'True' == \
template.Template(
'{{ float(states.sensor.temperature.state) > 11 }}',
self.hass).render()
def test_rounding_value(self):
"""Test rounding value."""
self.hass.states.set('sensor.temperature', 12.78)
assert '12.8' == \
template.Template(
'{{ states.sensor.temperature.state | round(1) }}',
self.hass).render()
assert '128' == \
template.Template(
'{{ states.sensor.temperature.state | multiply(10) | round }}',
self.hass).render()
def test_rounding_value_get_original_value_on_error(self):
"""Test rounding value get original value on error."""
assert 'None' == \
template.Template('{{ None | round }}', self.hass).render()
assert 'no_number' == \
template.Template(
'{{ "no_number" | round }}', self.hass).render()
def test_multiply(self):
"""Test multiply."""
tests = {
None: 'None',
10: '100',
'"abcd"': 'abcd'
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | multiply(10) | round }}' % inp,
self.hass).render()
def test_logarithm(self):
"""Test logarithm."""
tests = [
(4, 2, '2.0'),
(1000, 10, '3.0'),
(math.e, '', '1.0'),
('"invalid"', '_', 'invalid'),
(10, '"invalid"', '10.0'),
]
for value, base, expected in tests:
assert expected == \
template.Template(
'{{ %s | log(%s) | round(1) }}' % (value, base),
self.hass).render()
assert expected == \
template.Template(
'{{ log(%s, %s) | round(1) }}' % (value, base),
self.hass).render()
def test_sine(self):
"""Test sine."""
tests = [
(0, '0.0'),
(math.pi / 2, '1.0'),
(math.pi, '0.0'),
(math.pi * 1.5, '-1.0'),
(math.pi / 10, '0.309')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sin | round(3) }}' % value,
self.hass).render()
def test_cos(self):
"""Test cosine."""
tests = [
(0, '1.0'),
(math.pi / 2, '0.0'),
(math.pi, '-1.0'),
(math.pi * 1.5, '-0.0'),
(math.pi / 10, '0.951')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | cos | round(3) }}' % value,
self.hass).render()
def test_tan(self):
"""Test tangent."""
tests = [
(0, '0.0'),
(math.pi, '-0.0'),
(math.pi / 180 * 45, '1.0'),
(math.pi / 180 * 90, '1.633123935319537e+16'),
(math.pi / 180 * 135, '-1.0')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | tan | round(3) }}' % value,
self.hass).render()
def test_sqrt(self):
"""Test square root."""
tests = [
(0, '0.0'),
(1, '1.0'),
(2, '1.414'),
(10, '3.162'),
(100, '10.0'),
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sqrt | round(3) }}' % value,
self.hass).render()
def test_strptime(self):
"""Test the parse timestamp method."""
tests = [
('2016-10-19 15:22:05.588122 UTC',
'%Y-%m-%d %H:%M:%S.%f %Z', None),
('2016-10-19 15:22:05.588122+0100',
'%Y-%m-%d %H:%M:%S.%f%z', None),
('2016-10-19 15:22:05.588122',
'%Y-%m-%d %H:%M:%S.%f', None),
('2016-10-19', '%Y-%m-%d', None),
('2016', '%Y', None),
('15:22:05', '%H:%M:%S', None),
('1469119144', '%Y', '1469119144'),
('invalid', '%Y', 'invalid')
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = '{{ strptime(\'%s\', \'%s\') }}' % (inp, fmt)
assert str(expected) == \
template.Template(temp, self.hass).render()
def test_timestamp_custom(self):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, 'None'),
(1469119144, None, True, '2016-07-21 16:39:04'),
(1469119144, '%Y', True, '2016'),
(1469119144, 'invalid', True, 'invalid'),
(dt_util.as_timestamp(now), None, False,
now.strftime('%Y-%m-%d %H:%M:%S'))
]
for inp, fmt, local, out in tests:
if fmt:
fil = 'timestamp_custom(\'{}\')'.format(fmt)
elif fmt and local:
fil = 'timestamp_custom(\'{0}\', {1})'.format(fmt, local)
else:
fil = 'timestamp_custom'
assert out == template.Template(
'{{ %s | %s }}' % (inp, fil), self.hass).render()
def test_timestamp_local(self):
"""Test the timestamps to local filter."""
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_local }}' % inp,
self.hass).render()
def test_min(self):
"""Test the min filter."""
assert '1' == \
template.Template('{{ [1, 2, 3] | min }}',
self.hass).render()
def test_max(self):
"""Test the max filter."""
assert '3' == \
template.Template('{{ [1, 2, 3] | max }}',
self.hass).render()
def test_base64_encode(self):
"""Test the base64_encode filter."""
self.assertEqual(
'aG9tZWFzc2lzdGFudA==',
template.Template('{{ "homeassistant" | base64_encode }}',
self.hass).render())
def test_base64_decode(self):
"""Test the base64_decode filter."""
self.assertEqual(
'homeassistant',
template.Template('{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}',
self.hass).render())
def test_ordinal(self):
"""Test the ordinal filter."""
tests = [
(1, '1st'),
(2, '2nd'),
(3, '3rd'),
(4, '4th'),
(5, '5th'),
]
for value, expected in tests:
self.assertEqual(
expected,
template.Template(
'{{ %s | ordinal }}' % value,
self.hass).render())
def test_timestamp_utc(self):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
dt_util.as_timestamp(now):
now.strftime('%Y-%m-%d %H:%M:%S')
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_utc }}' % inp,
self.hass).render()
def test_as_timestamp(self):
"""Test the as_timestamp function."""
assert "None" == \
template.Template(
'{{ as_timestamp("invalid") }}', self.hass).render()
self.hass.mock = None
assert "None" == \
template.Template('{{ as_timestamp(states.mock) }}',
self.hass).render()
tpl = '{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", ' \
'"%Y-%m-%dT%H:%M:%S%z")) }}'
assert "1706951424.0" == \
template.Template(tpl, self.hass).render()
@patch.object(random, 'choice')
def test_random_every_time(self, test_choice):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template('{{ [1,2] | random }}', self.hass)
test_choice.return_value = 'foo'
assert 'foo' == tpl.render()
test_choice.return_value = 'bar'
assert 'bar' == tpl.render()
def test_passing_vars_as_keywords(self):
"""Test passing variables as keywords."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render(hello=127)
def test_passing_vars_as_vars(self):
"""Test passing variables as variables."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render({'hello': 127})
def test_passing_vars_as_list(self):
"""Test passing variables as list."""
assert "['foo', 'bar']" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': ['foo', 'bar']})
def test_passing_vars_as_list_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello[1] }}',
self.hass),
{'hello': ['foo', 'bar']})
def test_passing_vars_as_dict_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello.foo }}',
self.hass),
{'hello': {'foo': 'bar'}})
def test_passing_vars_as_dict(self):
"""Test passing variables as list."""
assert "{'foo': 'bar'}" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': {'foo': 'bar'}})
def test_render_with_possible_json_value_with_valid_json(self):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template('{{ value_json.hello }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_with_invalid_json(self):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template('{{ value_json }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{ I AM NOT JSON }')
def test_render_with_possible_json_value_with_template_error_value(self):
"""Render with possible JSON value with template error value."""
tpl = template.Template('{{ non_existing.variable }}', self.hass)
assert '-' == \
tpl.render_with_possible_json_value('hello', '-')
def test_render_with_possible_json_value_with_missing_json_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.goodbye }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_valid_with_is_defined(self):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template('{{ value_json.hello|is_defined }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '{"hello": "world"}' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json_error_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}', '')
def test_raise_exception_on_error(self):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template('{{ invalid_syntax').ensure_valid()
def test_if_state_exists(self):
"""Test if state exists works."""
self.hass.states.set('test.object', 'available')
tpl = template.Template(
'{% if states.test.object %}exists{% else %}not exists{% endif %}',
self.hass)
assert 'exists' == tpl.render()
def test_is_state(self):
"""Test is_state method."""
self.hass.states.set('test.object', 'available')
tpl = template.Template("""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state("test.noobject", "available") }}
""", self.hass)
assert 'False' == tpl.render()
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state_attr("test.noobject", "mode", "on") }}
""", self.hass)
assert 'False' == tpl.render()
def test_state_attr(self):
"""Test state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ state_attr("test.noobject", "mode") == None }}
""", self.hass)
assert 'True' == tpl.render()
def test_states_function(self):
"""Test using states as a function."""
self.hass.states.set('test.object', 'available')
tpl = template.Template('{{ states("test.object") }}', self.hass)
assert 'available' == tpl.render()
tpl2 = template.Template('{{ states("test.object2") }}', self.hass)
assert 'unknown' == tpl2.render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_now(self, mock_is_safe):
"""Test now method."""
now = dt_util.now()
with patch.dict(template.ENV.globals, {'now': lambda: now}):
assert now.isoformat() == \
template.Template('{{ now().isoformat() }}',
self.hass).render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_utcnow(self, mock_is_safe):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch.dict(template.ENV.globals, {'utcnow': lambda: now}):
assert now.isoformat() == \
template.Template('{{ utcnow().isoformat() }}',
self.hass).render()
def test_regex_match(self):
"""Test regex_match method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_match('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_match('home') }}
""", self.hass)
assert 'False' == tpl.render()
def test_regex_search(self):
"""Test regex_search method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_search('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_search('home') }}
""", self.hass)
assert 'True' == tpl.render()
def test_regex_replace(self):
"""Test regex_replace method."""
tpl = template.Template(r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""", self.hass)
assert 'World' == tpl.render()
def test_regex_findall_index(self):
"""Test regex_findall_index method."""
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""", self.hass)
assert 'JFK' == tpl.render()
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""", self.hass)
assert 'LHR' == tpl.render()
def test_bitwise_and(self):
"""Test bitwise_and method."""
tpl = template.Template("""
{{ 8 | bitwise_and(8) }}
""", self.hass)
assert str(8 & 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_and(2) }}
""", self.hass)
assert str(10 & 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_and(2) }}
""", self.hass)
assert str(8 & 2) == tpl.render()
def test_bitwise_or(self):
"""Test bitwise_or method."""
tpl = template.Template("""
{{ 8 | bitwise_or(8) }}
""", self.hass)
assert str(8 | 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_or(2) }}
""", self.hass)
assert str(10 | 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_or(2) }}
""", self.hass)
assert str(8 | 2) == tpl.render()
def test_distance_function_with_1_state(self):
"""Test distance function with 1 state."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
tpl = template.Template('{{ distance(states.test.object) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_2_states(self):
"""Test distance function with 2 states."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance(states.test.object, states.test.object_2) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_coord(self):
"""Test distance function with 1 coord."""
tpl = template.Template(
'{{ distance("32.87336", "-117.22943") | round }}', self.hass)
assert '187' == \
tpl.render()
def test_distance_function_with_2_coords(self):
"""Test distance function with 2 coords."""
assert '187' == \
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (self.hass.config.latitude, self.hass.config.longitude),
self.hass).render()
def test_distance_function_with_1_state_1_coord(self):
"""Test distance function with 1 state 1 coord."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) '
'| round }}', self.hass)
assert '187' == tpl.render()
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") '
'| round }}', self.hass)
assert '187' == tpl2.render()
def test_distance_function_return_None_if_invalid_state(self):
"""Test distance function return None if invalid state."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': 10,
})
tpl = template.Template('{{ distance(states.test.object_2) | round }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_return_None_if_invalid_coord(self):
"""Test distance function return None if invalid coord."""
assert 'None' == \
template.Template(
'{{ distance("123", "abc") }}', self.hass).render()
assert 'None' == \
template.Template('{{ distance("123") }}', self.hass).render()
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template('{{ distance("123", states.test_object_2) }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_with_2_entity_ids(self):
"""Test distance function with 2 entity ids."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_entity_1_coord(self):
"""Test distance function with 1 entity_id and 1 coord."""
self.hass.states.set('test.object', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}',
self.hass)
assert '187' == tpl.render()
def test_closest_function_home_vs_domain(self):
"""Test closest function home vs domain."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_test_domain.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain.object' == \
template.Template('{{ closest(states.test_domain).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_all_states(self):
"""Test closest function home vs all states."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain_2.and_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain_2.and_closer' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_entity_id(self):
"""Test closest function home vs group entity id."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest("group.location_group").entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_state(self):
"""Test closest function home vs group state."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest(states.group.location_group).entity_id }}',
self.hass).render()
def test_closest_function_to_coord(self):
"""Test closest function to coord."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (self.hass.config.latitude + 0.3,
self.hass.config.longitude + 0.3), self.hass)
assert 'test_domain.closest_zone' == \
tpl.render()
def test_closest_function_to_entity_id(self):
"""Test closest function to entity id."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest("zone.far_away", '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_to_state(self):
"""Test closest function to state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_invalid_state(self):
"""Test closest function invalid state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
for state in ('states.zone.non_existing', '"zone.non_existing"'):
assert 'None' == \
template.Template('{{ closest(%s, states) }}' % state,
self.hass).render()
def test_closest_function_state_with_invalid_location(self):
"""Test closest function state with invalid location."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': 'invalid latitude',
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template(
'{{ closest(states.test_domain.closest_home, '
'states) }}', self.hass).render()
def test_closest_function_invalid_coordinates(self):
"""Test closest function invalid coordinates."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template('{{ closest("invalid", "coord", states) }}',
self.hass).render()
def test_closest_function_no_location_states(self):
"""Test closest function without location states."""
assert '' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_extract_entities_none_exclude_stuff(self):
"""Test extract entities function with none or exclude stuff."""
assert [] == template.extract_entities(None)
assert [] == template.extract_entities("mdi:water")
assert MATCH_ALL == \
template.extract_entities(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}')
assert MATCH_ALL == \
template.extract_entities(
'{{ distance("123", states.test_object_2) }}')
def test_extract_entities_no_match_entities(self):
"""Test extract entities function with none entities stuff."""
assert MATCH_ALL == \
template.extract_entities(
"{{ value_json.tst | timestamp_custom('%Y' True) }}")
assert MATCH_ALL == \
template.extract_entities("""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""")
def test_extract_entities_match_entities(self):
"""Test extract entities function with entities stuff."""
assert ['device_tracker.phone_1'] == \
template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ states("binary_sensor.garage_door") }}
""")
assert ['device_tracker.phone_2'] == \
template.extract_entities("""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
""")
assert sorted([
'device_tracker.phone_1',
'device_tracker.phone_2',
]) == \
sorted(template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
"""))
assert sorted([
'sensor.pick_humidity',
'sensor.pick_temperature',
]) == \
sorted(template.extract_entities("""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
"""))
assert sorted([
'sensor.luftfeuchtigkeit_mean',
'input_number.luftfeuchtigkeit',
]) == \
sorted(template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
))
def test_extract_entities_with_variables(self):
"""Test extract entities function with variables and entities stuff."""
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state('input_boolean.switch', 'off') }}", {})
assert ['trigger.entity_id'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}", {})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state(data, 'off') }}", {})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(data, 'off') }}",
{'data': 'input_boolean.switch'})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}",
{'trigger': {'entity_id': 'input_boolean.switch'}})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state('media_player.' ~ where , 'playing') }}",
{'where': 'livingroom'})
def test_jinja_namespace(self):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
self.hass
)
self.hass.states.set('sensor.dummy', 'a value')
assert 'a value' == test_template.render()
self.hass.states.set('sensor.dummy', 'another value')
assert 'another value' == test_template.render()
@asyncio.coroutine
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set('sensor.test', '23', {
'unit_of_measurement': 'beers',
})
hass.states.async_set('sensor.test2', 'wow')
tpl = template.Template(
'{{ states.sensor.test.state_with_unit }}', hass)
assert tpl.async_render() == '23 beers'
tpl = template.Template(
'{{ states.sensor.test2.state_with_unit }}', hass)
assert tpl.async_render() == 'wow'
tpl = template.Template(
'{% for state in states %}{{ state.state_with_unit }} {% endfor %}',
hass)
assert tpl.async_render() == '23 beers wow'
tpl = template.Template('{{ states.sensor.non_existing.state_with_unit }}',
hass)
assert tpl.async_render() == ''
@asyncio.coroutine
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set('sensor.test', '23')
hass.states.async_set('sensor.test2', 'wow')
hass.states.async_set('climate.test2', 'cooling')
tpl = template.Template('{{ states | length }}', hass)
assert tpl.async_render() == '3'
tpl = template.Template('{{ states.sensor | length }}', hass)
assert tpl.async_render() == '2'
|
apache-2.0
| 7,943,343,220,525,600,000
| 35.530303
| 79
| 0.517628
| false
| 3.952459
| true
| false
| false
|
nsoranzo/tools-iuc
|
tools/metaphlan/formatoutput.py
|
1
|
5572
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import re
from pathlib import Path
taxo_level = {
'k': 'kingdom',
'p': 'phylum',
'c': 'class',
'o': 'order',
'f': 'family',
'g': 'genus',
's': 'species',
't': 'strains'}
def split_levels(metaphlan_output_fp, out_dp, legacy_output):
'''
Split default MetaPhlAn into a report for each taxonomic level
:param metaphlan_output_fp: Path default MetaPhlAn output
:param out_dp: Path to output directory
:param legacy_output: Boolean for legacy output
'''
# prepare output files
abund_f = {
'k': open(out_dp / Path('kingdom'), 'w'),
'p': open(out_dp / Path('phylum'), 'w'),
'c': open(out_dp / Path('class'), 'w'),
'o': open(out_dp / Path('order'), 'w'),
'f': open(out_dp / Path('family'), 'w'),
'g': open(out_dp / Path('genus'), 'w'),
's': open(out_dp / Path('species'), 'w'),
't': open(out_dp / Path('strains'), 'w')
}
for level in abund_f:
abund_f[level].write("%s\t" % taxo_level[level])
if not legacy_output:
abund_f[level].write("%s_id\t" % taxo_level[level])
abund_f[level].write("abundance\n")
levels_number = len(taxo_level)
with open(metaphlan_output_fp, 'r') as metaphlan_output_f:
with open(out_dp / Path('all'), 'w') as all_level_f:
# write header in all leve file
for level in ['k', 'p', 'c', 'o', 'f', 'g', 's', 't']:
all_level_f.write("%s\t" % taxo_level[level])
if not legacy_output:
all_level_f.write("%s_id\t" % taxo_level[level])
all_level_f.write("abundance\n")
# parse metaphlan file
for line in metaphlan_output_f.readlines():
# skip headers
if line.startswith("#"):
continue
# spit lines
split_line = line[:-1].split('\t')
taxo_n = split_line[0].split('|')
if legacy_output:
abundance = split_line[1]
else:
taxo_id = split_line[1].split('|')
abundance = split_line[2]
# get taxon name and ids
for i in range(len(taxo_n)):
taxo = taxo_n[i].split('__')[1]
taxo = taxo.replace("_", " ")
all_level_f.write("%s\t" % taxo)
if not legacy_output:
all_level_f.write("%s\t" % taxo_id[i])
# if not all taxon levels
for i in range(len(taxo_n), levels_number):
all_level_f.write('\t')
all_level_f.write("%s\n" % abundance)
# write
last_taxo_level = taxo_n[-1].split('__')
taxo = last_taxo_level[1].replace("_", " ")
level = last_taxo_level[0]
abund_f[level].write("%s\t" % taxo)
if not legacy_output:
abund_f[level].write("%s\t" % taxo_id[-1])
abund_f[level].write("%s\n" % abundance)
# close files
for taxo_level_f in abund_f:
abund_f[taxo_level_f].close()
def format_for_krona(metaphlan_output_fp, krona_out_fp):
'''
Split default MetaPhlAn into a report for each taxonomic levKRONAel
:param metaphlan_output_fp: Path default MetaPhlAn output
:param krona_out: Path to output file for Krona
'''
re_replace = re.compile(r"\w__")
re_bar = re.compile(r"\|")
re_underscore = re.compile(r"_")
with open(metaphlan_output_fp, 'r') as metaphlan_output_f:
with open(krona_out_fp, 'w') as krona_out_f:
for line in metaphlan_output_f.readlines():
if "s__" in line:
x = line.rstrip().split('\t')
lineage = re.sub(re_bar, '', x[0])
lineage = re.sub(re_replace, '\t', lineage)
lineage = re.sub(re_underscore, ' ', lineage)
krona_out_f.write("%s\t%s\n" % (x[-1], lineage))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Format MetaPhlAn output')
subparsers = parser.add_subparsers(dest='function')
# split_levels
split_levels_parser = subparsers.add_parser('split_levels', help='Split default MetaPhlAn into a report for each taxonomic level')
split_levels_parser.add_argument('--metaphlan_output', help="Path to default MetaPhlAn output")
split_levels_parser.add_argument('--outdir', help="Path to output directory")
split_levels_parser.add_argument('--legacy-output', dest='legacy_output', action='store_true', help="Old MetaPhlAn2 two columns output")
split_levels_parser.set_defaults(legacy_output=False)
# format_for_krona
format_for_krona_parser = subparsers.add_parser('format_for_krona', help='Split default MetaPhlAn into a report for each taxonomic level')
format_for_krona_parser.add_argument('--metaphlan_output', help="Path to default MetaPhlAn output")
format_for_krona_parser.add_argument('--krona_output', help="Path to Krona output directory")
args = parser.parse_args()
if args.function == 'split_levels':
split_levels(
Path(args.metaphlan_output),
Path(args.outdir),
args.legacy_output)
elif args.function == 'format_for_krona':
format_for_krona(
Path(args.metaphlan_output),
Path(args.krona_output))
|
mit
| -7,910,039,779,148,934,000
| 37.694444
| 142
| 0.542355
| false
| 3.316667
| false
| false
| false
|
jjcf89/vt_legislation_bot
|
vt_legislation_bot.py
|
1
|
1798
|
#!/usr/bin/python2.7
import argparse
import urllib
from bs4 import BeautifulSoup
#TODO Parse arguments
URL="http://legislature.vermont.gov/bill/status/2016/H.159"
def fetch_url(url):
opener = urllib.FancyURLopener({})
f = opener.open(url)
return f.read()
def fetch_example():
fd = open("Example.html")
return fd.read()
# Get website
#page = fetch_url(URL)
page = fetch_example()
# Feed page into BeautifulSoup parser
soup = BeautifulSoup(page)
# We are going to ignore everything outside of the #main-content div
main_content = soup.select("#main-content")[0]
# Bill number
bill_number = main_content.find("h1").string.strip()
print bill_number
# Bill description contained in the div .charge class
description = main_content.select(".charge")[0].string
print description
print
# Locations and Sponsors are included in the summary-table
summary_table = main_content.select(".summary-table")[0]
# Grab location header and print
location_dt = summary_table.find("dt", text="Location")
print location_dt.string + ":"
# Go over two tags to find location contents
location_dd = location_dt.next_sibling.next_sibling
print location_dd.string
print
# Grab sponsors header and print
sponsors_dt = summary_table.find("dt", text="Sponsor(s)")
print sponsors_dt.string + ":"
# Go over two tags to find sponsors contents
sponsors_dd = sponsors_dt.next_sibling.next_sibling
# Iterate over list of sponsors and print
for li in sponsors_dd.find_all("li"):
print li.string
print
# Detailed status table contains the FULL STATUS information
detailed_status_table = main_content.select("#bill-detailed-status-table")[0]
# FULL STATUS is the fourth column of table
full_status_td = detailed_status_table.find_all("td")[3]
print " ".join(full_status_td.stripped_strings)
print
# Print url
print URL
|
gpl-2.0
| 4,761,416,767,378,805,000
| 25.850746
| 77
| 0.75139
| false
| 3.210714
| false
| false
| false
|
jrichte43/ProjectEuler
|
Problem-0200/solutions.py
|
1
|
1266
|
__problem_title__ = "Find the 200th prime-proof sqube containing the contiguous sub-string "200""
__problem_url___ = "https://projecteuler.net/problem=200"
__problem_description__ = "We shall define a sqube to be a number of the form, , where and are " \
"distinct primes. For example, 200 = 5 2 or 120072949 = 23 61 . The " \
"first five squbes are 72, 108, 200, 392, and 500. Interestingly, 200 " \
"is also the first number for which you cannot change any single digit " \
"to make a prime; we shall call such numbers, prime-proof. The next " \
"prime-proof sqube which contains the contiguous sub-string "200" is " \
"1992008. Find the 200th prime-proof sqube containing the contiguous " \
"sub-string "200"."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
gpl-3.0
| -4,679,411,115,905,603,000
| 38.5625
| 100
| 0.575039
| false
| 4.057692
| false
| false
| false
|
DArtagan/teetimer
|
tracker/migrations/0003_auto__del_field_teetime_people.py
|
1
|
5085
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'TeeTime.people'
db.delete_column('tracker_teetime', 'people_id')
# Adding M2M table for field people on 'TeeTime'
m2m_table_name = db.shorten_name('tracker_teetime_people')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('teetime', models.ForeignKey(orm['tracker.teetime'], null=False)),
('user', models.ForeignKey(orm['accounts.user'], null=False))
))
db.create_unique(m2m_table_name, ['teetime_id', 'user_id'])
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'TeeTime.people'
raise RuntimeError("Cannot reverse this migration. 'TeeTime.people' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'TeeTime.people'
db.add_column('tracker_teetime', 'people',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.User']),
keep_default=False)
# Removing M2M table for field people on 'TeeTime'
db.delete_table(db.shorten_name('tracker_teetime_people'))
models = {
'accounts.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'related_name': "'user_set'", 'blank': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'related_name': "'user_set'", 'blank': 'True', 'symmetrical': 'False'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True', 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tracker.teetime': {
'Meta': {'object_name': 'TeeTime'},
'date_edited': ('django.db.models.fields.DateField', [], {'blank': 'True', 'auto_now': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'people': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.User']", 'symmetrical': 'False'}),
'slots': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['tracker']
|
mit
| -6,011,046,772,855,287,000
| 60.277108
| 192
| 0.574041
| false
| 3.77786
| false
| false
| false
|
Moonshile/fast12306
|
src/core/token.py
|
1
|
5021
|
#coding=utf-8
import re
from fetch import FetchSlice
class Token(object):
def __init__(self, session, base_url):
self.session = session
self.base_url = base_url
self.init_url_pattern = re.compile(r'<script\s+src="/(otn/dynamicJs/.+)"\s+type="text/javascript"\s+xml:space="preserve">\s*</script>\s+</head>')
self.key_pattern = re.compile(r'function\s+gc\(\)\s*{\s*var\s+key\s*=\s*\'(.+)\'\s*;var\s+value\s*=')
def retrieve_key(self, init_url):
"""
:param init_url: URL which contains the link to the js file that contains the token key
:param base_url: URL base
"""
fs = FetchSlice(self.session)
url = fs.fetch(self.init_url_pattern, init_url)[0]
key = fs.fetch(self.key_pattern, self.base_url + url)[0]
return key
def retrieve_value(self, key):
return self.encode32(self.bin216(self.base32('1111', key)))
"""
The following methods are translated from a javascript file from 12306
"""
def text2array(self, text, include_length):
length = len(text)
res = []
for i in range(0, length, 4):
res.append(ord(text[i]) | ord(text[i + 1]) << 8 | ord(text[i + 2]) << 16 | ord(text[i + 3]) << 24)
if include_length:
res.append(length)
return res
def array2text(self, data, include_length):
"""
length = len(data)
n = (length - 1) << 2;
if include_length:
m = data[length - 1]
if m < n - 3 or m > n:
return None
n = m
res = reduce(
lambda res, x: res + x,
map(
lambda x: chr(x & 0xff) + chr(x >> 8 & 0xff) + chr(x >> 16 & 0xff) + chr(x >> 24 & 0xff),
data
),
''
)
if include_length:
return res[:n]
else:
return res
"""
return map(lambda x: ((x&0xff)<<24)|((x>>8&0xff)<<16)|((x>>16&0xff)<<8)|(x>>24&0xff), data)
def base32(self, text, key):
delta = 0x9E3779B8
def rshift(v, n):
return (v % 0x100000000) >> n
def compute_mx(z, y, s, k, p, e):
r1 = rshift(z, 5)
r2 = y << 2 & 0xffffffff
r3 = r1 ^ r2
r4 = rshift(y, 3)
r5 = z << 4 & 0xffffffff
r6 = r4 ^ r5
r7 = r3 + r6 & 0xffffffff
r8 = s ^ y
r9 = k[p & 3 ^ e] ^ z
r10 = r8 + r9 & 0xffffffff
return r7 ^ r10
if text == '':
return ''
v = self.text2array(text, True)
k = self.text2array(key, False)
if len(k) < 4:
for i in range(0, 4 - len(k)):
k.append(0)
n = len(v) - 1
z = v[n]
y = v[0]
mx = None
e = None
p = None
q = int(6 + 52/(n + 1))
s = 0
while 0 < q:
q = q - 1
s = (s + delta & 0xffffffff)
e = rshift(s, 2) & 3
for p in range(0, n):
y = v[p + 1]
mx = compute_mx(z, y, s, k, p, e)
z = v[p] = (v[p] + mx & 0xffffffff)
p = n
y = v[0]
mx = compute_mx(z, y, s, k, p, e)
z = v[n] = (v[n] + mx & 0xffffffff)
return self.array2text(v, False)
def bin216(self, text):
"""
i = None
o = ''
n = None
text = text + ''
l = len(text)
b = ''
for i in range(0, l):
b = ord(text[i])
n = hex(b).replace('0x', '')
o = o + ('0' + n if len(n) < 2 else n)
return o
"""
return reduce(lambda res, x: res + x,
map(
lambda x: hex(0x100000000 | x)[3:],
text
),
''
)
def encode32(self, text):
keyStr = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
output = ''
chr1 = None
chr2 = None
chr3 = ''
enc1 = None
enc2 = None
enc3 = None
enc4 = ''
i = 0
while True:
chr1 = ord(text[i])
i = i + 1
chr2 = ord(text[i]) if i < len(text) else None
i = i + 1
chr3 = ord(text[i]) if i < len(text) else None
i = i + 1
enc1 = chr1 >> 2
enc2 = ((chr1 & 3) << 4) | ((chr2 >> 4) if chr2 else 0)
enc3 = (((chr2 & 15) << 2) if chr2 else 0) | ((chr3 >> 6) if chr3 else 0)
enc4 = chr3 & 63 if chr3 else 0
if chr2 is None:
enc3 = enc4 = 64
elif chr3 is None:
enc4 = 64
output = output + keyStr[enc1] + keyStr[enc2] + keyStr[enc3] + keyStr[enc4]
chr1 = chr2 = chr3 = ''
enc1 = enc2 = enc3 = enc4 = ''
if i >= len(text):
break
return output
|
apache-2.0
| -8,190,554,962,425,597,000
| 29.065868
| 153
| 0.430592
| false
| 3.340652
| false
| false
| false
|
jonathansick/synthsb
|
scripts/segmap_wircamsb.py
|
1
|
5931
|
#!/usr/bin/env python
# encoding: utf-8
"""
Compute WIRCam synthetic surface brightnesses within regions of a segmentation
map.
Accepts segmentation maps and pixel tables made by, e.g. andromass.
2014-11-18 - Created by Jonathan Sick
"""
import argparse
# from collections import defaultdict
# import math
import numpy as np
from astropy import log
import astropy.io.fits as fits
from astropy.wcs import WCS
from astropy.table import Table
# from sqlalchemy import func
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
from androphotsys import wircam_vega_to_ab
# from starplex.utils.timer import Timer
from synthsb.directsb import NoDataError
# from synthsb.directsb import compute_sb
def main():
log.setLevel("INFO")
args = parse_args()
segmap_fits = fits.open(args.seg_path)
segmap = segmap_fits[0].data
wcs = WCS(segmap_fits[0].header)
pixel_table = Table.read(args.pix_table_path,
format='ascii.commented_header')
fluxsum_J = np.full(len(pixel_table), 0, dtype=np.float)
varsum_J = np.full(len(pixel_table), 0, dtype=np.float)
fluxsum_Ks = np.full(len(pixel_table), 0, dtype=np.float)
varsum_Ks = np.full(len(pixel_table), 0, dtype=np.float)
star_count = np.zeros(len(pixel_table), dtype=np.int)
fields = ["M31-{0:d}".format(i) for i in range(1, 28)] + \
["M31-{0:d}".format(i) for i in range(47, 72)]
# fields = ['M31-1']
for field in fields:
print "Processing", field
data = load_photometry(field)
x, y = wcs.wcs_world2pix(data['ra'], data['dec'], 0)
# Round down to pixel indices
x = x.astype(np.int)
y = y.astype(np.int)
# Filter out stars contained inside the image footprint
ny, nx = segmap.shape
s = np.where((x >= 0) & (y >= 0) &
(x < nx) & (y < ny) &
np.isfinite(data['J']) & np.isfinite(data['Ks']) &
np.isfinite(data['J_err']) & np.isfinite(data['Ks_err']) &
(data['cfrac'] > 0.))[0]
data = data[s]
n_stars = data.shape[0]
flux_J, flux_var_J = mag_to_mjy(data['J'], data['J_err'])
flux_Ks, flux_var_Ks = mag_to_mjy(data['Ks'], data['Ks_err'])
for i in xrange(n_stars):
bin_id = segmap[y[i], x[i]]
if bin_id >= 0:
# add light to bin
fluxsum_J[bin_id] += flux_J[i] / data['cfrac'][i]
fluxsum_Ks[bin_id] += flux_Ks[i] / data['cfrac'][i]
varsum_J[bin_id] += flux_var_J[i]
varsum_Ks[bin_id] += flux_var_Ks[i]
star_count[bin_id] += 1
empty = np.where(star_count == 0)[0]
fluxsum_J[empty] = np.nan
fluxsum_Ks[empty] = np.nan
varsum_J[empty] = np.nan
varsum_Ks[empty] = np.nan
flux_err_J = np.sqrt(varsum_J)
flux_err_Ks = np.sqrt(varsum_Ks)
pixel_table['n_stars'] = star_count
pixel_table['synth_J'] = fluxsum_J
pixel_table['synth_Ks'] = fluxsum_Ks
pixel_table['synth_J_err'] = flux_err_J
pixel_table['synth_Ks_err'] = flux_err_Ks
pixel_table.write(args.output_path,
format='ascii.commented_header')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('pix_table_path')
parser.add_argument('seg_path')
parser.add_argument('output_path')
return parser.parse_args()
def load_photometry(fieldname,
use_vega=False, apply_intercal=False,
server='marvin'):
"""Load WIRCam photometry from Starplex, converted to AB mag.
Filter out MW stars with a rudimentary J-Ks > 0.9 (Vega) color cut.
"""
instrument = "wircam"
connect_to_server(server)
session = Session()
mag1obs = aliased(Observation)
mag2obs = aliased(Observation)
bp1 = aliased(Bandpass)
bp2 = aliased(Bandpass)
catalog = session.query(Catalog).\
filter(Catalog.name == fieldname).\
filter(Catalog.instrument == instrument).\
one()
q = session.query(CatalogStar.cfrac, CatalogStar.ra, CatalogStar.dec,
mag1obs.mag, mag1obs.mag_err,
mag2obs.mag, mag2obs.mag_err).\
join(mag1obs, CatalogStar.observations).\
join(mag2obs, CatalogStar.observations).\
join(Catalog).\
filter(Catalog.name == fieldname).\
filter(Catalog.instrument == instrument).\
join(bp1, mag1obs.bandpass).\
filter(bp1.name == "J").\
join(bp2, mag2obs.bandpass).\
filter(bp2.name == "Ks")
dt = [('cfrac', np.float), ('ra', np.float), ('dec', np.float),
('J', np.float), ('J_err', np.float),
('Ks', np.float), ('Ks_err', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
# Filter out MW stars
# FIXME rudimentary
# Using Vega Mag here!
sel = np.where((data['J'] - data['Ks']) > 0.9)[0]
data = data[sel]
# Apply the intercal ZP correction
if apply_intercal:
if 'intercal' in catalog.meta:
for band in ['J', 'Ks']:
if band in catalog.meta['intercal']:
data[band] += catalog.meta['intercal'][band]['zp']
# Convert to AB
if not use_vega:
data['J'] = wircam_vega_to_ab(data['J'], "J")
data['Ks'] = wircam_vega_to_ab(data['Ks'], "Ks")
log.info("Field {0} {2} has {1:d} stars".
format(fieldname, data.shape[0], instrument))
session.close()
if len(data) == 0:
raise NoDataError
return data
def mag_to_mjy(mag, mag_err):
MICROJY_ZP = 10. ** 6. * 10. ** 23. * 10. ** (-48.6 / 2.5)
mjy = MICROJY_ZP * np.power(10., -mag / 2.5)
mjy_err = (mjy * mag_err) / 1.0875
return mjy, mjy_err
if __name__ == '__main__':
main()
|
bsd-3-clause
| 7,598,660,081,612,174,000
| 32.320225
| 79
| 0.581521
| false
| 3.08264
| false
| false
| false
|
hjanime/VisTrails
|
vistrails/db/versions/v0_9_3/translate/v0_9_2.py
|
1
|
7218
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
from vistrails.db.versions.v0_9_3.domain import DBVistrail, DBAction, DBTag, DBModule, \
DBConnection, DBPortSpec, DBFunction, DBParameter, DBLocation, DBAdd, \
DBChange, DBDelete, DBAnnotation, DBPort, DBAbstractionRef, DBGroup
def translateVistrail(_vistrail):
vistrail = DBVistrail()
for _action in _vistrail.db_actions:
ops = []
for op in _action.db_operations:
if op.vtType == 'add':
data = convert_data(op.db_data)
ops.append(DBAdd(id=op.db_id,
what=op.db_what,
objectId=op.db_objectId,
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
data=data))
elif op.vtType == 'change':
data = convert_data(op.db_data)
ops.append(DBChange(id=op.db_id,
what=op.db_what,
oldObjId=op.db_oldObjId,
newObjId=op.db_newObjId,
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType,
data=data))
elif op.vtType == 'delete':
ops.append(DBDelete(id=op.db_id,
what=op.db_what,
objectId=op.db_objectId,
parentObjId=op.db_parentObjId,
parentObjType=op.db_parentObjType))
annotations = []
for annotation in _action.db_annotations:
annotations.append(DBAnnotation(id=annotation.db_id,
key=annotation.db_key,
value=annotation.db_value))
session = _action.db_session
if not session:
session = None
else:
session = long(_action.db_session)
action = DBAction(id=_action.db_id,
prevId=_action.db_prevId,
date=_action.db_date,
user=_action.db_user,
prune=_action.db_prune,
session=session,
operations=ops,
annotations=annotations)
vistrail.db_add_action(action)
for _tag in _vistrail.db_tags:
tag = DBTag(id=_tag.db_id,
name=_tag.db_name)
vistrail.db_add_tag(tag)
vistrail.db_version = '0.9.3'
return vistrail
def convert_data(child):
if child.vtType == 'module':
return DBModule(id=child.db_id,
cache=child.db_cache,
name=child.db_name,
namespace=child.db_namespace,
package=child.db_package,
version=child.db_version,
tag=child.db_tag)
elif child.vtType == 'abstractionRef':
return DBAbstractionRef(id=child.db_id,
name=child.db_name,
cache=child.db_cache,
abstraction_id=child.db_abstraction_id,
version=child.db_version)
elif child.vtType == 'connection':
return DBConnection(id=child.db_id)
elif child.vtType == 'portSpec':
return DBPortSpec(id=child.db_id,
name=child.db_name,
type=child.db_type,
spec=child.db_spec)
elif child.vtType == 'function':
return DBFunction(id=child.db_id,
pos=child.db_pos,
name=child.db_name)
elif child.vtType == 'parameter':
return DBParameter(id=child.db_id,
pos=child.db_pos,
name=child.db_name,
type=child.db_type,
val=child.db_val,
alias=child.db_alias)
elif child.vtType == 'location':
return DBLocation(id=child.db_id,
x=child.db_x,
y=child.db_y)
elif child.vtType == 'annotation':
return DBAnnotation(id=child.db_id,
key=child.db_key,
value=child.db_value)
elif child.vtType == 'port':
return DBPort(id=child.db_id,
type=child.db_type,
moduleId=child.db_moduleId,
moduleName=child.db_moduleName,
name=child.db_name,
spec=child.db_spec)
elif child.vtType == 'group':
return DBGroup(id=child.db_id,
workflow=child.db_workflow,
cache=child.db_cache,
name=child.db_name,
namespace=child.db_namespace,
package=child.db_package,
version=child.db_version,
tag=child.db_tag)
|
bsd-3-clause
| -7,308,370,435,144,763,000
| 44.683544
| 88
| 0.518426
| false
| 4.474892
| false
| false
| false
|
MuckRock/muckrock
|
muckrock/message/tasks.py
|
1
|
11469
|
"""
Tasks for the messages application.
"""
# Django
from celery.exceptions import SoftTimeLimitExceeded
from celery.schedules import crontab
from celery.task import periodic_task, task
from django.contrib.auth.models import User
from django.utils import timezone
# Standard Library
import logging
from random import randint
# Third Party
import stripe
from dateutil.relativedelta import relativedelta
from requests.exceptions import RequestException
# MuckRock
from muckrock.accounts.models import RecurringDonation
from muckrock.core.utils import stripe_retry_on_error
from muckrock.crowdfund.models import RecurringCrowdfundPayment
from muckrock.message import digests, receipts
from muckrock.message.email import TemplateEmail
from muckrock.message.notifications import SlackNotification
logger = logging.getLogger(__name__)
@task(
time_limit=600,
soft_time_limit=570,
name="muckrock.message.tasks.send_activity_digest",
)
def send_activity_digest(user_id, subject, preference):
"""Individual task to create and send an activity digest to a user."""
user = User.objects.get(id=user_id)
interval = {
"hourly": relativedelta(hours=1),
"daily": relativedelta(days=1),
"weekly": relativedelta(weeks=1),
"monthly": relativedelta(months=1),
}[preference]
logger.info(
"Starting activity digest at: %s User: %s Subject: %s Interval: %s",
timezone.now(),
user,
subject,
interval,
)
try:
email = digests.ActivityDigest(user=user, subject=subject, interval=interval)
email.send()
except SoftTimeLimitExceeded:
logger.error(
"Send Activity Digest took too long. " "User: %s, Subject: %s, Interval %s",
user,
subject,
interval,
)
def send_digests(preference, subject):
"""Helper to send out timed digests"""
users = User.objects.filter(
profile__email_pref=preference, notifications__read=False
).distinct()
for user in users:
send_activity_digest.delay(user.pk, subject, preference)
# every hour
@periodic_task(
run_every=crontab(hour="*/1", minute=0), name="muckrock.message.tasks.hourly_digest"
)
def hourly_digest():
"""Send out hourly digest"""
send_digests("hourly", "Hourly Digest")
# every day at 10am
@periodic_task(
run_every=crontab(hour=10, minute=0), name="muckrock.message.tasks.daily_digest"
)
def daily_digest():
"""Send out daily digest"""
send_digests("daily", "Daily Digest")
# every Monday at 10am
@periodic_task(
run_every=crontab(day_of_week=1, hour=10, minute=0),
name="muckrock.message.tasks.weekly_digest",
)
def weekly_digest():
"""Send out weekly digest"""
send_digests("weekly", "Weekly Digest")
# first day of every month at 10am
@periodic_task(
run_every=crontab(day_of_month=1, hour=10, minute=0),
name="muckrock.message.tasks.monthly_digest",
)
def monthly_digest():
"""Send out monthly digest"""
send_digests("monthly", "Monthly Digest")
# every day at 9:30am
@periodic_task(
run_every=crontab(hour=9, minute=30), name="muckrock.message.tasks.staff_digest"
)
def staff_digest():
"""Send out staff digest"""
staff_users = User.objects.filter(is_staff=True)
for staff_user in staff_users:
email = digests.StaffDigest(user=staff_user, subject="Daily Staff Digest")
email.send()
@task(name="muckrock.message.tasks.send_invoice_receipt")
def send_invoice_receipt(invoice_id):
"""Send out a receipt for an invoiced charge"""
invoice = stripe_retry_on_error(stripe.Invoice.retrieve, invoice_id)
try:
charge = stripe_retry_on_error(stripe.Charge.retrieve, invoice.charge)
except stripe.error.InvalidRequestError:
# a free subscription has no charge attached
# maybe send a notification about the renewal
# but for now just handle the error
return
try:
customer = stripe_retry_on_error(stripe.Customer.retrieve, invoice.customer)
charge.metadata["email"] = customer.email
except stripe.error.InvalidRequestError:
logger.error("Could not retrieve customer")
return
plan = get_subscription_type(invoice)
if plan == "donate":
receipt_function = receipts.donation_receipt
elif plan.startswith("crowdfund"):
receipt_function = receipts.crowdfund_payment_receipt
charge.metadata["crowdfund_id"] = plan.split("-")[1]
recurring_payment = RecurringCrowdfundPayment.objects.filter(
subscription_id=invoice.subscription
).first()
if recurring_payment:
recurring_payment.log_payment(charge)
else:
logger.error("No recurring crowdfund payment for: %s", invoice.subscription)
else:
# other types are handled by squarelet
return
receipt = receipt_function(None, charge)
receipt.send(fail_silently=False)
@task(name="muckrock.message.tasks.send_charge_receipt")
def send_charge_receipt(charge_id):
"""Send out a receipt for a charge"""
logger.info("Charge Receipt for %s", charge_id)
charge = stripe_retry_on_error(stripe.Charge.retrieve, charge_id)
# if the charge was generated by an invoice, let the invoice handler send the receipt
if charge.invoice:
return
# we should expect charges to have metadata attached when they are made
try:
user_email = charge.metadata["email"]
user_action = charge.metadata["action"]
except KeyError:
# squarelet charges will not have matching metadata
logger.warning("Malformed charge metadata, no receipt sent: %s", charge)
return
# try getting the user based on the provided email
# we know from Checkout purchases that logged in users have their email autofilled
try:
user = User.objects.get(email=user_email)
except User.DoesNotExist:
user = None
logger.info("Charge Receipt User: %s", user)
try:
receipt_functions = {
"crowdfund-payment": receipts.crowdfund_payment_receipt,
"donation": receipts.donation_receipt,
}
receipt_function = receipt_functions[user_action]
except KeyError:
# squarelet charges will be handled on squarelet
logger.warning("Unrecognized charge: %s", user_action)
receipt_function = receipts.generic_receipt
receipt = receipt_function(user, charge)
receipt.send(fail_silently=False)
def get_subscription_type(invoice):
"""Gets the subscription type from the invoice."""
# get the first line of the invoice
if invoice.lines.total_count > 0:
return invoice.lines.data[0].plan.id
else:
return "unknown"
@task(name="muckrock.message.tasks.failed_payment")
def failed_payment(invoice_id):
"""Notify a customer about a failed subscription invoice."""
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
invoice = stripe_retry_on_error(stripe.Invoice.retrieve, invoice_id)
attempt = invoice.attempt_count
subscription_type = get_subscription_type(invoice)
recurring_donation = None
crowdfund = None
email_to = []
if subscription_type == "donate":
recurring_donation = RecurringDonation.objects.filter(
subscription_id=invoice.subscription
).first()
if recurring_donation:
user = recurring_donation.user
if user is None:
email_to = [recurring_donation.email]
recurring_donation.payment_failed = True
recurring_donation.save()
else:
user = None
logger.error("No recurring crowdfund found for %s", invoice.subscription)
elif subscription_type.startswith("crowdfund"):
recurring_payment = RecurringCrowdfundPayment.objects.filter(
subscription_id=invoice.subscription
).first()
if recurring_payment:
user = recurring_payment.user
if user is None:
email_to = [recurring_payment.email]
crowdfund = recurring_payment.crowdfund
recurring_payment.payment_failed = True
recurring_payment.save()
else:
user = None
logger.error("No recurring crowdfund found for %s", invoice.subscription)
else:
# squarelet handles other types
return
subject = "Your payment has failed"
context = {"attempt": attempt, "type": subscription_type, "crowdfund": crowdfund}
if subscription_type.startswith("crowdfund"):
context["type"] = "crowdfund"
if attempt == 4:
# on last attempt, cancel the user's subscription and lower the failed payment flag
if subscription_type == "donate" and recurring_donation:
recurring_donation.cancel()
elif subscription_type.startswith("crowdfund") and recurring_payment:
recurring_payment.cancel()
logger.info("%s subscription has been cancelled due to failed payment", user)
subject = "Your %s subscription has been cancelled" % subscription_type
context["attempt"] = "final"
else:
logger.info("Failed payment by %s, attempt %s", user, attempt)
notification = TemplateEmail(
user=user,
to=email_to,
extra_context=context,
text_template="message/notification/failed_payment.txt",
html_template="message/notification/failed_payment.html",
subject=subject,
)
notification.send(fail_silently=False)
@task(name="muckrock.message.tasks.support")
def support(user_id, message, task_id):
"""Send a response to a user about a flag task."""
# pylint: disable=import-outside-toplevel
from muckrock.task.models import FlaggedTask
user = User.objects.get(id=user_id)
task_ = FlaggedTask.objects.get(id=task_id)
context = {"message": message, "task": task_}
notification = TemplateEmail(
user=user,
extra_context=context,
text_template="message/notification/support.txt",
html_template="message/notification/support.html",
subject="Support #%d" % task_.id,
)
notification.send(fail_silently=False)
@task(name="muckrock.message.tasks.notify_project_contributor")
def notify_project_contributor(user_id, project_id, added_by_id):
"""Notify a user that they were added as a contributor to a project."""
# pylint: disable=import-outside-toplevel
from muckrock.project.models import Project
user = User.objects.get(id=user_id)
project = Project.objects.get(id=project_id)
added_by = User.objects.get(id=added_by_id)
context = {"project": project, "added_by": added_by}
notification = TemplateEmail(
user=user,
extra_context=context,
text_template="message/notification/project.txt",
html_template="message/notification/project.html",
subject="Added to a project",
)
notification.send(fail_silently=False)
@task(name="muckrock.message.tasks.slack")
def slack(payload):
"""Send a Slack notification using the provided payload."""
try:
notification = SlackNotification(payload)
notification.send(fail_silently=False)
except RequestException as exc:
slack.retry(
countdown=2 ** slack.request.retries * 30 + randint(0, 30),
args=[payload],
exc=exc,
)
|
agpl-3.0
| -8,486,624,507,151,663,000
| 33.966463
| 91
| 0.671026
| false
| 3.852536
| false
| false
| false
|
Connectomics-Classes/hackwicket-silverbacks
|
postProcessScript/src/postPocess.py
|
1
|
1940
|
#!/usr/bin/python
#imports
import numpy as np
import cv
import cv2
import sys
import glob
from mayavi import mlab
from mayavi.mlab import *
def morphOps(probMap):
workableMap = probMap.copy()
#apply erosion & dilation filtering, rids photo of stray vescicle detections
workableMap = eroDilFilter(workableMap, 5, 3) #4 and 1 experimentally determined
#restrict array to include only above 70% confidence
restrictedArr = cv2.inRange(workableMap,.7, 1)
#change all remaining values to 100
restrictedArr[restrictedArr > 0] = 100
return restrictedArr
def loadNpyVolume():
print 'loading files from data directory...'
numpyVolume = []
#for all of the numpy arrays in the current directory
for numpyArr in glob.glob('../data/*.np[yz]'):
probMap = np.load(numpyArr)
#if the numpy is 3d
if(len(probMap.shape) == 3):
#split and add sub numpys to volume
#reorder parameters for looping
np.rollaxis(probMap, 2)
for subMap in probMap:
#add all subArrs in the 3d npy to the volume
print subMap.shape
numpyVolume.append(subMap)
#if the numpy is 2d
elif(len(probMap.shape) == 2):
numpyVolume.append(probMap)
#if the numpy doesnt make sense
else:
print 'Error: Npy data format not recognized'
return numpyVolume
def eroDilFilter(array,ero, dil):
kernel = np.ones((ero, ero), np.uint8)
erodedArr = cv2.erode(array,kernel)
kernel = np.ones((dil,dil), np.uint8)
dilatedArr = cv2.dilate(array, kernel)
return dilatedArr
#load the numpy into the program
numpyVolume = loadNpyVolume()
#instantiate list of array for display
stackableList = []
#perform morph ops to clean up data
print 'cleaning up data...'
for probMap in numpyVolume:
displayArr = morphOps(probMap)
#add array to display list
stackableList.append(displayArr)
#stitch arrays together for display
print 'generating 3d volume...'
finalVolume = np.dstack(stackableList)
#display arrays
mlab.contour3d(finalVolume)
mlab.show()
|
apache-2.0
| -1,765,742,468,419,555,000
| 27.115942
| 81
| 0.744845
| false
| 3.055118
| false
| false
| false
|
ethanrublee/ecto-release
|
python/ecto/__init__.py
|
1
|
7057
|
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import platform, sys
#import inspect
class EctoCellBase(object):
pass
def cell_getitem(self, *args, **kwargs):
if len(args) == 1 and type(args[0]) == slice:
return __getitem_slice__(self.__impl, args[0])
if len(args) == 1 and type(args[0]) == tuple:
return __getitem_tuple__(self.__impl, args[0])
if len(args) == 1 and type(args[0]) == list:
return __getitem_list__(self.__impl, args[0])
return __getitem_str__(self.__impl, args[0])
def cellinit(cpptype):
def impl(self, *args, **kwargs):
if len(args) > 1:
raise RuntimeError("Too many positional args: only one allowed, representing cell instance name")
e = lookup(cpptype)
c = self.__impl = e.construct()
if len(args) == 1:
self.__impl.name(args[0])
e.declare_params(self.__impl.params)
# c.construct(args, kwargs)
#print "c=", c
self.inputs = c.inputs
self.outputs = c.outputs
self.params = c.params
for k, v in kwargs.iteritems():
if k == 'strand':
self.__impl._set_strand(v)
elif isinstance(v, _cell_cpp):
setattr(self.params, k, v.__impl)
else:
setattr(self.params, k, v)
# print "now:", getattr(self.params, k)
e.declare_io(self.params, self.inputs, self.outputs)
try:
self.__impl.verify_params()
except ecto.EctoException as e:
print >>sys.stderr, cpptype
raise type(e)('\nCell Type: %s\nCell Name: %s\nWhat:\n%s'%(cpptype,self.__impl.name(),str(e)))
# self.params.get('k') = v
return impl
def cell_print_tendrils(tendril):
s = ""
for x in tendril:
try:
value = str(x.data().get())
except TypeError, e:
value = "[unprintable]"
s += " - " + x.key() + " [%s]" % x.data().type_name
if x.data().required:
s += " REQUIRED"
if x.data().has_default:
s += " default = " + value
s += "\n"
docstr = str(x.data().doc)
doclines = docstr.splitlines()
if doclines :
for docline in doclines:
s += " " + docline + "\n"
s += "\n"
return s
@classmethod
def cell_inspect(self, *args, **kwargs):
c = self.__factory()
c.declare_params()
c.declare_io()
return c
def cell_process(self):
return self.__impl.process()
def cell_configure(self):
return self.__impl.configure()
def cell_name(self):
return self.__impl.name()
def cell_typename(self):
return self.__impl.typename()
def cell_doc(short_doc, c):
doc =short_doc + "\n\n"
params = cell_print_tendrils(c.params)
inputs = cell_print_tendrils(c.inputs)
outputs = cell_print_tendrils(c.outputs)
if(params):
doc += "Parameters:\n%s"%params
if(inputs):
doc += "Inputs:\n%s"%inputs
if(outputs):
doc += "Outputs:\n%s"%outputs
return doc
def postregister(cellname, cpptypename, short_doc, inmodule):
e = lookup(cpptypename)
c = e.construct()
c.declare_params()
c.declare_io()
thistype = type(cellname, (_cell_cpp,),
dict(__doc__ = cell_doc(short_doc,c),
__module__ = inmodule.__name__,
inputs = c.inputs,
outputs = c.outputs,
params = c.params,
type = c.typename,
short_doc = short_doc,
__init__ = cellinit(cpptypename),
__getitem__ = cell_getitem,
inspect = cell_inspect,
process = cell_process,
configure = cell_configure,
name = cell_name,
type_name = cell_typename,
__factory = e.construct,
__looks_like_a_cell__ = True
))
inmodule.__dict__[cellname] = thistype
if platform.system().startswith('freebsd'):
# C++ modules are extremely fragile when loaded with RTLD_LOCAL,
# which is what Python uses on FreeBSD by default, and maybe other
# systems. Convince it to use RTLD_GLOBAL.
# See thread by Abrahams et al:
# http://mail.python.org/pipermail/python-dev/2002-May/024074.html
sys.setdlopenflags(0x102)
def load_pybindings(name, path):
"""
Merges python bindings from shared library 'name' into module 'name'.
Use when you have a directory structure::
lib/
foo.so
foo/
__init__.py
something.py
Here, inside ``foo/__init__.py`` call ``load_pybindings(__name__, __path__)``
this assumes that the first entry in list ``__path__`` is where
you want the wrapped classes to merge to.
"""
import imp
m = imp.load_dynamic(name, path[0] + ".so") #TODO this is only going to work on unix...
thismod = sys.modules[name]
for (k,v) in m.__dict__.items():
if not k.startswith("_"):
thismod.__dict__[k] = v
load_pybindings(__name__, __path__)
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from doc import *
from cell import *
from blackbox import *
import test
#
# temporary backwards compat measures
#
schedulers.Threadpool = schedulers.Multithreaded
|
bsd-3-clause
| -9,032,442,430,647,433,000
| 32.604762
| 110
| 0.586652
| false
| 3.858393
| false
| false
| false
|
excid3/keryx
|
keryx/unwrapt/Download.py
|
1
|
5779
|
# Unwrapt - cross-platform package system emulator
# Copyright (C) 2010 Chris Oliver <chris@excid3.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import urllib
import httplib
import urlparse
from datetime import datetime
from utils import format_number
#TODO: Add resume support: http://code.activestate.com/recipes/83208-resuming-download-of-a-file/
class InvalidCredentials(Exception):
"""
Exception raised if the proxy credentials are invalid
"""
pass
class ProxyOpener(urllib.FancyURLopener):
"""
Class for handling proxy credentials
"""
def __init__(self, proxy={}, usr=None, pwd=None):
urllib.FancyURLopener.__init__(self, proxy)
self.count = 0
self.proxy = proxy
self.usr = usr
self.pwd = pwd
def prompt_user_passwd(self, host, realm):
"""
Override the FancyURLopener prompt and simply return what was given
Raise an error if there is a problem
"""
self.count += 1
if self.count > 1:
raise InvalidCredentials, "Unable to authenticate to proxy"
return (self.usr, self.pwd)
def textprogress(display, current, total):
"""
Download progress in terminal
"""
percentage = current/float(total) * 100
sys.stdout.write("\r%-56.56s %3i%% [%5sB / %5sB]" % \
(display,
percentage,
format_number(current),
format_number(total)))
if percentage == 100:
sys.stdout.write("\n")
# This makes sure the cursor ends up on the far right
# Without this the cursor constantly jumps around
sys.stdout.flush()
def download_url(url, filename, display=None, progress=textprogress, proxy={}, username=None, password=None):
"""
Downloads a file to ram and returns a string of the contents
"""
if not display:
display = url.rsplit("/", 1)[1]
# Do we already have a file to continue off of?
# modified determines whether the file is outdated or not based on headers
modified = None
downloaded = 0
if os.path.exists(filename):
modified = datetime.utcfromtimestamp(os.stat(filename).st_mtime)
downloaded = os.path.getsize(filename)
# Open up a temporary connection to see if the file we have downloaded
# is still usable (based on modification date)
# format meanings are located http://docs.python.org/library/time.html
opener = ProxyOpener(proxy, username, password)
headers = opener.open(url).headers
if modified and "Last-Modified" in headers:
dt = datetime.strptime(headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S %Z")
# File is too old so we delete the old file
if modified < dt:
#logging.debug("OLD FILE")
#print "OLD FILE"
downloaded = 0
os.remove(filename)
# Test existing filesize compared to length of download
if "Content-Length" in headers:
length = int(headers["Content-Length"])
# File already downloaded?
if downloaded == length:
progress("Hit: %s" % display, length, length)
return
# File corrupted?
elif downloaded > length:
downloaded = 0
os.remove(filename)
# Open up the real connection for downloading
opener = ProxyOpener(proxy, username, password)
if downloaded:
opener.addheader("Range", "bytes=%s-" % str(downloaded))
page = opener.open(url)
# The file range must have matched the download size
if not "Content-Length" in page.headers:
progress("Hit: %s" % display, downloaded, downloaded)
return
# Finish downloading the file
length = int(page.headers["Content-Length"]) + downloaded
f = open(filename, "ab")
while 1:
data = page.read(8192)
if not data:
break
downloaded += len(data)
f.write(data)
progress(display, downloaded, length)
f.close()
page.close()
return
##Check for Valid URL based on the HTTP response code
def httpExists(url):
host, path = urlparse.urlsplit(url)[1:3]
found = False
connection = httplib.HTTPConnection(host) ## Make HTTPConnection Object
try:
connection.request("HEAD", path)
responseOb = connection.getresponse() ## Grab HTTPResponse Object
if responseOb.status == 200:
found = True
except:
pass
return found
if __name__ == "__main__":
# Successful proxy usage
#download_url("http://launchpad.net/keryx/stable/0.92/+download/keryx_0.92.4.zip",
# "keryx.zip")
#proxy={"http": "http://tank:3128"},
#username="excid3", password="password")
download_url("http://dl.google.com/linux/chrome/deb/dists/stable/main/binary-amd64/Packages.gz", "google.gz")
download_url("http://linux.dropbox.com/ubuntu/dists/maverick/main/binary-amd64/Packages.gz", "dropbox.gz")
|
gpl-3.0
| -3,711,110,002,626,798,000
| 30.407609
| 113
| 0.622599
| false
| 4.127857
| false
| false
| false
|
trevisanj/f311
|
f311/collaboration.py
|
1
|
8609
|
"""
Collaboration-related routines
1) Class Catalog -- resources to retrieve File* classes by different criteria
2) Script utilities: collect scripts (standalone applications) across collaborator packages
"""
from collections import OrderedDict
import importlib
import a99
import os
import glob
import copy
__all__ = [
"COLLABORATORS_C", "COLLABORATORS_S",
"classes_txt", "classes_bin", "classes_sp", "classes_file",
"get_suitable_vis_classes", "get_suitable_vis_list_classes",
"get_scripts_path", "get_programs_dict",
"EXTERNAL_COLLABORATORS"
]
# List of Python packages to be considered "external collaborators"
#
# These packages may contribute with:
# - scripts
# - DataFile subclasses
# - Vis subclasses
EXTERNAL_COLLABORATORS = ["pyfant", "aosss", "convmolworks", "ariastro"]
# List of **classes** collaborators packages (**change to add**)
#
COLLABORATORS_C = ["f311"]+EXTERNAL_COLLABORATORS
# List of **script** collaborator packages to look for scripts (**change to add**)
#
__F311 = ["f311"]+["f311."+x for x in a99.get_subpackages_names(os.path.split(__file__)[0])]
COLLABORATORS_S = __F311+EXTERNAL_COLLABORATORS
# ** **** ****** **** ****** ****
# ** ** ****** ****** ** ** ****** ****** ** ** ****** ******
# **** **** **** **** **** ****
#
# Class catalog-related routines
def get_suitable_vis_classes(obj):
"""Retuns a list of Vis classes that can handle obj."""
ret = []
for class_ in classes_vis():
if isinstance(obj, class_.input_classes):
ret.append(class_)
return ret
def get_suitable_vis_list_classes(objs):
"""Retuns a list of VisList classes that can handle a list of objects."""
from f311 import explorer as ex
ret = []
for class_ in classes_vis():
if isinstance(class_, ex.VisList):
flag_can = True
for obj in objs:
if not isinstance(obj, class_.item_input_classes):
flag_can = False
break
if flag_can:
ret.append(class_)
return ret
def classes_txt():
"""Classes to consider when attempts to load a text file (see load_any_file())"""
if __flag_first:
__setup()
return _classes_txt
def classes_bin():
"""Classes to consider when attempts to load a binary file (see load_any_file())"""
if __flag_first:
__setup()
return _classes_bin
def classes_sp():
"""Classes to consider when attempts to load a spectrum file (see load_spectrum())"""
if __flag_first:
__setup()
return _classes_sp
def classes_file(flag_leaf=False):
"""All known File* classes
Args:
flag_leaf: returns only classes that do not have subclasses
("leaf" nodes as in a class tree graph)
"""
if __flag_first:
__setup()
if not flag_leaf:
return _classes_file
return [cls for cls in _classes_file if cls not in _classes_file_superclass]
def classes_vis():
"""All known Vis* classes"""
if __flag_first:
__setup()
return _classes_vis
def _collect_classes(m):
"""
Adds entries to _classes_*
Args:
m: module object that must contain the following sub-modules: datatypes, vis
"""
from f311 import filetypes as ft
from f311 import explorer as ex
def _extend(classes, newclasses):
"""Filters out classes already present in list.
This shouldn't be necessary, but collaborators may accidentally import already loaded
classes into the datatypes namespace"""
classes.extend([class_ for class_ in newclasses if class_ not in classes])
# classes.extend(newclasses)
file_classes = [class_ for class_ in a99.get_classes_in_module(m, ft.DataFile) if class_.flag_collect]
# Classes to consider when attempts to load a text file (see load_any_file())
_extend(_classes_txt, [class_ for class_ in file_classes if class_.flag_txt])
# Classes to consider when attempts to load a binary file (see load_any_file())
_extend(_classes_bin, [class_ for class_ in file_classes if not class_.flag_txt])
# Adds Classes to consider when attempts to load a spectrum file (see load_spectrum())
_extend(_classes_sp, [class_ for class_ in file_classes if issubclass(class_, ft.FileSpectrum)])
# All kwown File* classes
_extend(_classes_file, file_classes)
# All kwnown Vis* classes
_extend(_classes_vis, a99.get_classes_in_module(m, ex.Vis))
global _classes_file_superclass
_classes_file_superclass = [cls.__bases__[0] for cls in _classes_file]
# # List of classes representing all file formats either read or written
# ====================================================================
_classes_txt = []
_classes_bin = []
_classes_sp = []
_classes_file = []
_classes_file_superclass = [] # superclasses of items in _classes_file
_classes_vis = []
__flag_first = True
__collaborators = OrderedDict()
def __setup():
"""Will be executed in the first time someone calls classes_*() """
global __collaborators, __flag_first
import f311
__flag_first = False
for pkgname in f311.COLLABORATORS_C:
try:
pkg = importlib.import_module(pkgname)
a99.get_python_logger().info("Imported collaborator package '{}'".format(pkgname))
try:
if hasattr(pkg, "_setup_filetypes"):
pkg._setup_filetypes()
else:
_collect_classes(pkg)
__collaborators[pkgname] = pkg
except:
a99.get_python_logger().exception(
"Actually, package '{}' gave error".format(pkgname))
raise
except:
a99.get_python_logger().warning("Failed to import package '{}".format(pkgname))
# raise
# ** **** ****** **** ****** ****
# ** ** ****** ****** ** ** ****** ****** ** ** ****** ******
# **** **** **** **** **** ****
#
# Scripts-related routines
def get_scripts_path(packagename):
"""**Convention** Returns full path to scripts directory"""
return os.path.join(packagename, "scripts")
# {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
# keys in COLLABORATORS_S
__programs_dict = None
def _get_programs_dict():
"""
Builds and returns programs dictionary
This will have to import the packages in COLLABORATORS_S in order to get their absolute path.
Returns:
dictionary: {"packagename": [ExeInfo0, ...], ...}
"packagename" examples: "f311.explorer", "numpy"
"""
global __programs_dict
if __programs_dict is not None:
return __programs_dict
d = __programs_dict = OrderedDict()
for pkgname in COLLABORATORS_S:
try:
package = importlib.import_module(pkgname)
except ImportError:
# I think it is better to be silent when a collaborator package is not installed
continue
path_ = os.path.join(os.path.split(package.__file__)[0], "scripts")
bulk = a99.get_exe_info(path_, flag_protected=True)
d[pkgname] = {"description": a99.get_obj_doc0(package), "exeinfo": bulk}
return __programs_dict
def get_programs_dict(pkgname_only=None, flag_protected=False):
"""
Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed
Args:
pkgname_only: name of single package within COLLABORATORS_S
flag_protected: include scripts starting with "_"?
Returns:
dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
"""
___ret = _get_programs_dict()
__ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),))
if flag_protected:
_ret = __ret
else:
_ret = copy.deepcopy(__ret)
for value in _ret.values():
value["exeinfo"] = [exeinfo for exeinfo in value["exeinfo"] if not exeinfo.filename.startswith("_")]
# Removes packages that may have gone out of scripts after filtering
ret = _ret if pkgname_only is None and flag_protected is None else \
OrderedDict(((key, value) for key, value in _ret.items() if len(value["exeinfo"]) > 0))
return ret
|
gpl-3.0
| 7,911,966,800,209,908,000
| 30.534799
| 112
| 0.588454
| false
| 3.790841
| false
| false
| false
|
Onirik79/aaritmud
|
src/enums/MONTH.py
|
1
|
1699
|
# -*- coding: utf-8 -*-
"""
Enumerazione dei mesi di un anno rpg.
"""
from src.element import EnumElement, finalize_enumeration
#-------------------------------------------------------------------------------
name = __name__[__name__.rfind(".")+1 : ]
elements = []
cycle_on_last = True
#-------------------------------------------------------------------------------
class MonthElement(EnumElement):
def __init__(self, name, description=""):
super(MonthElement, self).__init__(name, description)
#- Fine Inizializzazione -
#-------------------------------------------------------------------------------
NONE = MonthElement("Nessuno")
ONE = MonthElement("[white]Inverno del Lupo[close]", "dell'[white]Inverno del Lupo[close]")
TWO = MonthElement("[cyan]Gigante di Ghiaccio[close]", "del [cyan]Gigante di Ghiaccio[close]")
THREE = MonthElement("[blue]Arcano Passato[close]", "dell'[blue]Arcano Passato[close]")
FOUR = MonthElement("[green]Natura[close]", "della [green]Natura[close]")
FIVE = MonthElement("[red]Grande Lotta[close]", "della [red]Grande Lotta[close]")
SIX = MonthElement("[red]Dragone[close]", "del [red]Dragone[close]")
SEVEN = MonthElement("[red]Battaglia[close]", "della [red]Battaglia[close]")
EIGHT = MonthElement("[dimgray]Lunghe Ombre[close]", "delle [dimgray]Lunghe Ombre[close]")
NINE = MonthElement("[blue]Antica Oscurità[close]", "dell'[blue]Antica Oscurità[close]")
TEN = MonthElement("[dimgray]Grande Male[close]", "del [dimgray]Grande Male[close]")
#-------------------------------------------------------------------------------
finalize_enumeration(__name__)
|
gpl-2.0
| 2,097,785,702,521,239,300
| 40.341463
| 96
| 0.533923
| false
| 3.516598
| false
| false
| false
|
priseborough/ardupilot
|
Tools/ardupilotwaf/boards.py
|
1
|
22752
|
#!/usr/bin/env python
# encoding: utf-8
from collections import OrderedDict
import sys, os
import waflib
from waflib import Utils
from waflib.Configure import conf
_board_classes = {}
_board = None
class BoardMeta(type):
def __init__(cls, name, bases, dct):
super(BoardMeta, cls).__init__(name, bases, dct)
if 'abstract' not in cls.__dict__:
cls.abstract = False
if cls.abstract:
return
if not hasattr(cls, 'toolchain'):
cls.toolchain = 'native'
board_name = getattr(cls, 'name', name)
if board_name in _board_classes:
raise Exception('board named %s already exists' % board_name)
_board_classes[board_name] = cls
class Board:
abstract = True
def __init__(self):
self.with_uavcan = False
def configure(self, cfg):
cfg.env.TOOLCHAIN = cfg.options.toolchain or self.toolchain
cfg.env.ROMFS_FILES = []
cfg.load('toolchain')
cfg.load('cxx_checks')
env = waflib.ConfigSet.ConfigSet()
self.configure_env(cfg, env)
d = env.get_merged_dict()
# Always prepend so that arguments passed in the command line get
# the priority.
for k, val in d.items():
# Dictionaries (like 'DEFINES') are converted to lists to
# conform to waf conventions.
if isinstance(val, dict):
keys = list(val.keys())
if not isinstance(val, OrderedDict):
keys.sort()
val = ['%s=%s' % (vk, val[vk]) for vk in keys]
if k in cfg.env and isinstance(cfg.env[k], list):
cfg.env.prepend_value(k, val)
else:
cfg.env[k] = val
cfg.ap_common_checks()
cfg.env.prepend_value('INCLUDES', [
cfg.srcnode.find_dir('libraries/AP_Common/missing').abspath()
])
def configure_env(self, cfg, env):
# Use a dictionary instead of the convetional list for definitions to
# make easy to override them. Convert back to list before consumption.
env.DEFINES = {}
env.CFLAGS += [
'-ffunction-sections',
'-fdata-sections',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Wno-trigraphs',
'-Werror=shadow',
'-Werror=return-type',
'-Werror=unused-result',
'-Werror=narrowing',
'-Werror=attributes',
]
if cfg.options.enable_scripting:
env.DEFINES.update(
ENABLE_SCRIPTING = 1,
ENABLE_HEAP = 1,
LUA_32BITS = 1,
)
env.ROMFS_FILES += [
('sandbox.lua', 'libraries/AP_Scripting/scripts/sandbox.lua'),
]
env.AP_LIBRARIES += [
'AP_Scripting',
'AP_Scripting/lua/src',
]
env.CXXFLAGS += [
'-DHAL_HAVE_AP_ROMFS_EMBEDDED_H'
]
if cfg.options.scripting_checks:
env.DEFINES.update(
AP_SCRIPTING_CHECKS = 1,
)
if 'clang' in cfg.env.COMPILER_CC:
env.CFLAGS += [
'-fcolor-diagnostics',
'-Wno-gnu-designator',
'-Wno-inconsistent-missing-override',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-g',
'-O0',
]
if cfg.options.enable_math_check_indexes:
env.CXXFLAGS += ['-DMATH_CHECK_INDEXES']
env.CXXFLAGS += [
'-std=gnu++11',
'-fdata-sections',
'-ffunction-sections',
'-fno-exceptions',
'-fsigned-char',
'-Wall',
'-Wextra',
'-Wformat',
'-Wpointer-arith',
'-Wcast-align',
'-Wundef',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-reorder',
'-Wno-redundant-decls',
'-Wno-unknown-pragmas',
'-Werror=attributes',
'-Werror=format-security',
'-Werror=enum-compare',
'-Werror=array-bounds',
'-Werror=uninitialized',
'-Werror=init-self',
'-Werror=narrowing',
'-Werror=return-type',
'-Werror=switch',
'-Werror=sign-compare',
'-Werror=type-limits',
'-Werror=unused-result',
'-Werror=shadow',
'-Werror=unused-variable',
'-Wfatal-errors',
'-Wno-trigraphs',
]
if 'clang++' in cfg.env.COMPILER_CXX:
env.CXXFLAGS += [
'-fcolor-diagnostics',
'-Werror=inconsistent-missing-override',
'-Werror=overloaded-virtual',
'-Wno-gnu-designator',
'-Wno-mismatched-tags',
'-Wno-gnu-variable-sized-type-not-at-end',
]
else:
env.CXXFLAGS += [
'-Werror=unused-but-set-variable'
]
if cfg.env.DEBUG:
env.CXXFLAGS += [
'-g',
'-O0',
]
if cfg.env.DEST_OS == 'darwin':
env.LINKFLAGS += [
'-Wl,-dead_strip',
]
else:
env.LINKFLAGS += [
'-Wl,--gc-sections',
]
if self.with_uavcan:
env.AP_LIBRARIES += [
'AP_UAVCAN',
'modules/uavcan/libuavcan/src/**/*.cpp'
]
env.CXXFLAGS += [
'-Wno-error=cast-align',
]
env.DEFINES.update(
UAVCAN_CPP_VERSION = 'UAVCAN_CPP03',
UAVCAN_NO_ASSERTIONS = 1,
UAVCAN_NULLPTR = 'nullptr'
)
env.INCLUDES += [
cfg.srcnode.find_dir('modules/uavcan/libuavcan/include').abspath()
]
# We always want to use PRI format macros
cfg.define('__STDC_FORMAT_MACROS', 1)
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
if bld.env.ROMFS_FILES:
self.embed_ROMFS_files(bld)
def build(self, bld):
bld.ap_version_append_str('GIT_VERSION', bld.git_head_hash(short=True))
import time
ltime = time.localtime()
bld.ap_version_append_int('BUILD_DATE_YEAR', ltime.tm_year)
bld.ap_version_append_int('BUILD_DATE_MONTH', ltime.tm_mon)
bld.ap_version_append_int('BUILD_DATE_DAY', ltime.tm_mday)
def embed_ROMFS_files(self, ctx):
'''embed some files using AP_ROMFS'''
import embed
if ctx.env.USE_NUTTX_IOFW:
# use fmuv2_IO_NuttX.bin instead of fmuv2_IO.bin
for i in range(len(ctx.env.ROMFS_FILES)):
(name,filename) = ctx.env.ROMFS_FILES[i]
if name == 'io_firmware.bin':
filename = 'Tools/IO_Firmware/fmuv2_IO_NuttX.bin'
print("Using IO firmware %s" % filename)
ctx.env.ROMFS_FILES[i] = (name,filename);
header = ctx.bldnode.make_node('ap_romfs_embedded.h').abspath()
if not embed.create_embedded_h(header, ctx.env.ROMFS_FILES):
ctx.fatal("Failed to created ap_romfs_embedded.h")
Board = BoardMeta('Board', Board.__bases__, dict(Board.__dict__))
def add_dynamic_boards():
'''add boards based on existance of hwdef.dat in subdirectories for ChibiOS'''
dirname, dirlist, filenames = next(os.walk('libraries/AP_HAL_ChibiOS/hwdef'))
for d in dirlist:
if d in _board_classes.keys():
continue
hwdef = os.path.join(dirname, d, 'hwdef.dat')
if os.path.exists(hwdef):
newclass = type(d, (chibios,), {'name': d})
def get_boards_names():
add_dynamic_boards()
return sorted(list(_board_classes.keys()), key=str.lower)
def get_removed_boards():
'''list of boards which have been removed'''
return sorted(['px4-v1', 'px4-v2', 'px4-v3', 'px4-v4', 'px4-v4pro'])
@conf
def get_board(ctx):
global _board
if not _board:
if not ctx.env.BOARD:
ctx.fatal('BOARD environment variable must be set before first call to get_board()')
if ctx.env.BOARD in get_removed_boards():
ctx.fatal('''
The board target %s has been removed from ArduPilot with the removal of NuttX support and HAL_PX4.
Please use a replacement build as follows:
px4-v2 Use Pixhawk1 build
px4-v3 Use Pixhawk1 or CubeBlack builds
px4-v4 Use Pixracer build
px4-v4pro Use DrotekP3Pro build
''' % ctx.env.BOARD)
boards = _board_classes.keys()
if not ctx.env.BOARD in boards:
ctx.fatal("Invalid board '%s': choices are %s" % (ctx.env.BOARD, ', '.join(sorted(boards, key=str.lower))))
_board = _board_classes[ctx.env.BOARD]()
return _board
# NOTE: Keeping all the board definitions together so we can easily
# identify opportunities to simplify common flags. In the future might
# be worthy to keep board definitions in files of their own.
class sitl(Board):
def configure_env(self, cfg, env):
super(sitl, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_SITL',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_NONE',
AP_SCRIPTING_CHECKS = 1, # SITL should always do runtime scripting checks
)
env.CXXFLAGS += [
'-Werror=float-equal'
]
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_SITL',
'SITL',
]
if cfg.options.enable_sfml:
if not cfg.check_SFML(env):
cfg.fatal("Failed to find SFML libraries")
env.CXXFLAGS += ['-DWITH_SITL_OSD','-DOSD_ENABLED=ENABLED','-DHAL_HAVE_AP_ROMFS_EMBEDDED_H']
import fnmatch
for f in os.listdir('libraries/AP_OSD/fonts'):
if fnmatch.fnmatch(f, "font*bin"):
env.ROMFS_FILES += [(f,'libraries/AP_OSD/fonts/'+f)]
if cfg.options.enable_sfml_audio:
if not cfg.check_SFML_Audio(env):
cfg.fatal("Failed to find SFML Audio libraries")
env.CXXFLAGS += ['-DWITH_SITL_TONEALARM']
if cfg.options.sitl_flash_storage:
env.CXXFLAGS += ['-DSTORAGE_USE_FLASH=1']
if cfg.env.DEST_OS == 'cygwin':
env.LIB += [
'winmm',
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
if 'clang++' in cfg.env.COMPILER_CXX:
print("Disabling SLP for clang++")
env.CXXFLAGS += [
'-fno-slp-vectorize' # compiler bug when trying to use SLP
]
class chibios(Board):
abstract = True
toolchain = 'arm-none-eabi'
def configure_env(self, cfg, env):
super(chibios, self).configure_env(cfg, env)
cfg.load('chibios')
env.BOARD = self.name
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_CHIBIOS',
HAVE_OCLOEXEC = 0,
HAVE_STD_NULLPTR_T = 0,
)
env.AP_LIBRARIES += [
'AP_HAL_ChibiOS',
]
# make board name available for USB IDs
env.CHIBIOS_BOARD_NAME = 'HAL_BOARD_NAME="%s"' % self.name
env.CFLAGS += cfg.env.CPU_FLAGS + [
'-Wno-cast-align',
'-Wlogical-op',
'-Wframe-larger-than=1300',
'-fsingle-precision-constant',
'-Wno-attributes',
'-Wno-error=double-promotion',
'-Wno-error=missing-declarations',
'-Wno-error=float-equal',
'-Wno-error=undef',
'-Wno-error=cpp',
'-fno-exceptions',
'-Wall',
'-Wextra',
'-Wno-sign-compare',
'-Wfloat-equal',
'-Wpointer-arith',
'-Wmissing-declarations',
'-Wno-unused-parameter',
'-Werror=array-bounds',
'-Wfatal-errors',
'-Werror=uninitialized',
'-Werror=init-self',
'-Wframe-larger-than=1024',
'-Werror=unused-but-set-variable',
'-Wno-missing-field-initializers',
'-Wno-trigraphs',
'-fno-strict-aliasing',
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-fno-strength-reduce',
'-fno-builtin-printf',
'-fno-builtin-fprintf',
'-fno-builtin-vprintf',
'-fno-builtin-vfprintf',
'-fno-builtin-puts',
'-mno-thumb-interwork',
'-mthumb',
'--specs=nano.specs',
'-specs=nosys.specs',
'-DCHIBIOS_BOARD_NAME="%s"' % self.name,
]
env.CXXFLAGS += env.CFLAGS + [
'-fno-rtti',
'-fno-threadsafe-statics',
]
if Utils.unversioned_sys_platform() == 'cygwin':
env.CXXFLAGS += ['-DCYGWIN_BUILD']
bldnode = cfg.bldnode.make_node(self.name)
env.BUILDROOT = bldnode.make_node('').abspath()
env.LINKFLAGS = cfg.env.CPU_FLAGS + [
'-fomit-frame-pointer',
'-falign-functions=16',
'-ffunction-sections',
'-fdata-sections',
'-u_port_lock',
'-u_port_unlock',
'-u_exit',
'-u_kill',
'-u_getpid',
'-u_errno',
'-uchThdExit',
'-fno-common',
'-nostartfiles',
'-mno-thumb-interwork',
'-mthumb',
'-specs=nano.specs',
'-specs=nosys.specs',
'-L%s' % env.BUILDROOT,
'-L%s' % cfg.srcnode.make_node('modules/ChibiOS/os/common/startup/ARMCMx/compilers/GCC/ld/').abspath(),
'-L%s' % cfg.srcnode.make_node('libraries/AP_HAL_ChibiOS/hwdef/common/').abspath(),
'-Wl,--gc-sections,--no-warn-mismatch,--library-path=/ld,--script=ldscript.ld,--defsym=__process_stack_size__=%s,--defsym=__main_stack_size__=%s' % (cfg.env.PROCESS_STACK, cfg.env.MAIN_STACK)
]
if cfg.env.DEBUG:
env.CFLAGS += [
'-gdwarf-4',
'-g3',
]
env.LINKFLAGS += [
'-gdwarf-4',
'-g3',
]
if cfg.env.ENABLE_ASSERTS:
cfg.msg("Enabling ChibiOS asserts", "yes")
env.CFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
env.CXXFLAGS += [ '-DHAL_CHIBIOS_ENABLE_ASSERTS' ]
else:
cfg.msg("Enabling ChibiOS asserts", "no")
env.LIB += ['gcc', 'm']
env.GIT_SUBMODULES += [
'ChibiOS',
]
try:
import intelhex
env.HAVE_INTEL_HEX = True
cfg.msg("Checking for intelhex module:", 'OK')
except Exception:
cfg.msg("Checking for intelhex module:", 'disabled', color='YELLOW')
env.HAVE_INTEL_HEX = False
def build(self, bld):
super(chibios, self).build(bld)
bld.ap_version_append_str('CHIBIOS_GIT_VERSION', bld.git_submodule_head_hash('ChibiOS', short=True))
bld.load('chibios')
def pre_build(self, bld):
'''pre-build hook that gets called before dynamic sources'''
super(chibios, self).pre_build(bld)
from waflib.Context import load_tool
module = load_tool('chibios', [], with_sys_path=True)
fun = getattr(module, 'pre_build', None)
if fun:
fun(bld)
class linux(Board):
def configure_env(self, cfg, env):
super(linux, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD = 'HAL_BOARD_LINUX',
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NONE',
)
if not cfg.env.DEBUG:
env.CXXFLAGS += [
'-O3',
]
env.LIB += [
'm',
]
cfg.check_librt(env)
cfg.check_lttng(env)
cfg.check_libdl(env)
cfg.check_libiio(env)
env.LINKFLAGS += ['-pthread',]
env.AP_LIBRARIES += [
'AP_HAL_Linux',
]
if self.with_uavcan:
cfg.define('UAVCAN_EXCEPTIONS', 0)
if cfg.options.apstatedir:
cfg.define('AP_STATEDIR', cfg.options.apstatedir)
def build(self, bld):
super(linux, self).build(bld)
if bld.options.upload:
waflib.Options.commands.append('rsync')
# Avoid infinite recursion
bld.options.upload = False
class erleboard(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erleboard, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBOARD',
)
class navio(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO',
)
class navio2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(navio2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_NAVIO2',
)
class edge(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(edge, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_EDGE',
)
class zynq(linux):
toolchain = 'arm-xilinx-linux-gnueabi'
def configure_env(self, cfg, env):
super(zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ZYNQ',
)
class ocpoc_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(ocpoc_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_OCPOC_ZYNQ',
)
class bbbmini(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(bbbmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BBBMINI',
)
class blue(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(blue, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BLUE',
)
class pocket(linux):
toolchain = 'arm-linux-gnueabihf'
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(pocket, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_POCKET',
)
class pxf(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxf, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXF',
)
class bebop(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bebop, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BEBOP',
)
class disco(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(disco, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DISCO',
)
class erlebrain2(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(erlebrain2, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_ERLEBRAIN2',
)
class bhat(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(bhat, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_BH',
)
class dark(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(dark, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_DARK',
)
class pxfmini(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(pxfmini, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_PXFMINI',
)
class aero(linux):
def __init__(self):
self.with_uavcan = True
def configure_env(self, cfg, env):
super(aero, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_AERO',
)
class rst_zynq(linux):
toolchain = 'arm-linux-gnueabihf'
def configure_env(self, cfg, env):
super(rst_zynq, self).configure_env(cfg, env)
env.DEFINES.update(
CONFIG_HAL_BOARD_SUBTYPE = 'HAL_BOARD_SUBTYPE_LINUX_RST_ZYNQ',
)
class SITL_static(sitl):
def configure_env(self, cfg, env):
super(SITL_static, self).configure_env(cfg, env)
cfg.env.STATIC_LINKING = True
class SITL_x86_64_linux_gnu(SITL_static):
toolchain = 'x86_64-linux-gnu'
class SITL_arm_linux_gnueabihf(SITL_static):
toolchain = 'arm-linux-gnueabihf'
|
gpl-3.0
| -5,554,313,772,015,345,000
| 28.936842
| 203
| 0.533887
| false
| 3.517081
| true
| false
| false
|
cemagg/sucem-fem
|
examples/2D_waveguide/eigenmode/TM_driver.py
|
1
|
3597
|
## Copyright (C) 2011 Stellenbosch University
##
## This file is part of SUCEM.
##
## SUCEM is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## SUCEM is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SUCEM. If not, see <http://www.gnu.org/licenses/>.
##
## Contact: cemagga@gmail.com
# Authors:
# Evan Lezar <mail@evanlezar.com>
"""A simple 2D eigenproblem which calculates the TM modes of a square guide.
Note that this is done by modelling the Magnetic field and as such no dirichlet BCs are used.
Only natural boundary conditions."""
import sys
import numpy as N
import os
import dolfin as dol
sys.path.insert(0, '../../../')
from sucemfem.ProblemConfigurations.EMVectorWaveEigenproblem import EigenProblem
from sucemfem.ProblemConfigurations.EMVectorWaveEigenproblem import DefaultEigenSolver
from sucemfem.Consts import c0
del sys.path[0]
script_path = os.path.dirname(__file__)
# Load the mesh and the material region markers
mesh = dol.UnitSquare ( 5, 5 )
a = 1.0
b = 1.0
mesh.coordinates()[:,0] = a*mesh.coordinates()[:,0]
mesh.coordinates()[:,1] = b*mesh.coordinates()[:,1]
# Use 4th order basis functions
order = 4
# Set up the eigen problem
ep = EigenProblem()
ep.set_mesh(mesh)
ep.set_basis_order(order)
ep.init_problem()
# Set up eigen problem solver where sigma is the shift to use in the
# shift-invert process
sigma = 1.5
es = DefaultEigenSolver()
es.set_eigenproblem(ep)
es.set_sigma(sigma)
# Solve the eigenproblem
eigs_w, eigs_v = es.solve_problem(10)
# Output the results
#res = N.array(sorted(eigs_w)[0:])
res = N.array(sorted(1/eigs_w+sigma)[0:]) #HAVE TO CORRECT FOR THE SPECTRUM SHIFT
res = N.sqrt(res)/N.pi
def k_mnl ( abd, m, n, l, normalize = False):
"""
Return the analytical cutoff wavenumber for a resonant cavity with dimensions specified in the tuple abd
@param abd: a 3-tuple of the dimensions in meters of the cavitys
@param m: the index of the mode in the x direction
@param n: the index of the mode in the y direction
@param l: the index of the mode in the z direction
@param normalize: divide by the factor \pi
"""
if len(abd) == 3:
a, b, d = abd
elif len(abd) == 2:
a, b = abd
d = 1;
l = 0;
root = N.sqrt ( (m/a)**2 + (n/b)**2 + (l/d)**2 )
if normalize:
return root
else:
return root*N.pi
print '\nreference:'
steps = 5
abd = (a, b)
ids = []
values = []
for m in range(steps):
for n in range(steps):
l = 0
i = (m,n)
if i.count(0) == 0:
ids.append((m,n,l))
values.append(k_mnl ( abd, m, n, l, True ))
import warnings
warnings.simplefilter("ignore", N.ComplexWarning)
r = 0;
errors = N.zeros_like(res)
print "mnl, analytical, calculated, relative error"
for i in N.argsort(values).tolist():
if r < len(res):
errors[r] = N.linalg.norm( res[r] - values[i])/N.linalg.norm( values[i] )
print "%d%d%d, " % (
ids[i]), "%9.3f, %10.3f, %.2e" % ( values[i], res[r], errors[r] )
r += 1
else:
break;
N.testing.assert_array_almost_equal( errors, N.zeros_like(res), 4 )
|
gpl-3.0
| 1,550,712,025,160,284,200
| 28.491803
| 108
| 0.66055
| false
| 3.136007
| false
| false
| false
|
datagrok/python-misc
|
datagrok/misc/cli.py
|
1
|
2465
|
"""Framework for creating tools that employ a robust command-line interface."""
from __future__ import absolute_import
import os
import shutil
# TODO: interface to datagrok.ansicolor ?
class CLIManager(object):
"""Captures the boilerplate involved in making a decent command-line
interface for a multi-function script.
Think about the interface to cvs, svn, git, etc.
Example:
class MyApp(CLIManager):
def cmd_who(self):
"Tells who"
pass
def cmd_what(self)
"Tells what"
pass
...
if __name__=='__main__':
import sys
# Create an instance with arg0
App = MyApp(sys.argv.pop(0))
# Call the instance with command line arguments
App(*sys.argv)
"""
def __init__(self, argv0):
self.argv0 = os.path.basename(argv0)
def __call__(self, *args):
args = list(args)
command = '_default'
if len(args):
command = args.pop(0)
if command == '--help':
command = 'help'
getattr(self, 'cmd_%s' % command, self._cmd_not_found(command))(*args)
if len(args) == 1 and command != 'help':
print
print "See '%s help' for more information." % self.argv0
def _cmd_not_found(self, command):
def error():
print "%s: '%s' is not a known command. see '%s help'" % (self.argv0, command, self.argv0)
return error
def cmd_help(self, *args):
"""Prints the usage information for this program or a command"""
if len(args) == 0:
print "usage: %s COMMAND [ARGS]" % self.argv0
print
print "The most commonly used commands are:"
for command in [x[len('cmd_'):] for x in dir(self) if x.startswith('cmd_') and not x.startswith('cmd__')]:
print " %-10s %s" % (command, getattr(self, 'cmd_' + command).__doc__.splitlines()[0])
print
print "See '%s help COMMAND' for more information on a specific command." % self.argv0
else:
command = list(args).pop(0)
cmd = getattr(self, 'cmd_%s' % command, None)
if cmd:
print "usage: %s %s [ARGS]" % (self.argv0, command)
print cmd.__doc__
else:
self._cmd_not_found(command)(self)
cmd__default = cmd_help
|
agpl-3.0
| -7,054,265,450,523,929,000
| 33.236111
| 118
| 0.533469
| false
| 3.975806
| false
| false
| false
|
andrewyang96/RacetrackGenerator
|
catmullrom.py
|
1
|
1950
|
# Source: http://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
# http://people.wku.edu/qi.li/teaching/446/cg14_curve_surface.pdf
import numpy as np
from utils import distance
def CatmullRomSpline(P0, P1, P2, P3, nPoints=100):
"""
P0, P1, P2, and P3 should be (x,y) point pairs that define the Catmull-Rom spline.
nPoints is the number of points to include in this curve segment.
"""
# Convert the points to numpy so that we can do array multiplication
P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])
# Calculate t0 to t4
alpha = 0.5
def tj(ti, Pi, Pj):
xi, yi = Pi
xj, yj = Pj
return ( ( (xj-xi)**2 + (yj-yi)**2 )**0.5 )**alpha + ti
t0 = P0[0]
t1 = tj(t0, P0, P1)
t2 = tj(t1, P1, P2)
t3 = P3[0]
# Only calculate points between P1 and P2
t = np.linspace(t1,t2,nPoints)
# Reshape so that we can multiply by the points P0 to P3
# and get a point for each value of t.
t = t.reshape(len(t),1)
A1 = (t1-t)/(t1-t0)*P0 + (t-t0)/(t1-t0)*P1
A2 = (t2-t)/(t2-t1)*P1 + (t-t1)/(t2-t1)*P2
A3 = (t3-t)/(t3-t2)*P2 + (t-t2)/(t3-t2)*P3
B1 = (t2-t)/(t2-t0)*A1 + (t-t0)/(t2-t0)*A2
B2 = (t3-t)/(t3-t1)*A2 + (t-t1)/(t3-t1)*A3
C = (t2-t)/(t2-t1)*B1 + (t-t1)/(t2-t1)*B2
return C
def CatmullRomLoop(loop, pointsPerUnitDist=1.):
"""
Calculate Catmull Rom for a list of points, named loop, with loop[0] == loop[-1].
"""
if len(loop) < 4:
raise ValueError("Loop must have at least 4 points in it")
ret = []
# Add extra control points to ends
loop = [loop[-2],] + loop + [loop[1],]
# Produce coords for loop
for i in xrange(len(loop)-3):
numPoints = int(distance(loop[i+1], loop[i+2]) * pointsPerUnitDist)
ret.append(CatmullRomSpline(loop[i], loop[i+1], loop[i+2], loop[i+3], nPoints=numPoints))
ret = [tuple(coords) for seg in ret for coords in seg]
return ret
|
mit
| 1,019,315,799,465,238,700
| 33.210526
| 97
| 0.587692
| false
| 2.443609
| false
| false
| false
|
VaclavDedik/infinispan-py
|
tests/unit/test_utils.py
|
1
|
1300
|
# -*- coding: utf-8 -*-
import pytest
from infinispan import utils
from infinispan.hotrod import TimeUnits
class TestUtils(object):
def test_from_pretty_time(self):
assert utils.from_pretty_time('10s') == (10, TimeUnits.SECONDS)
assert utils.from_pretty_time('10ms') == (10, TimeUnits.MILISECONDS)
assert utils.from_pretty_time('10ns') == (10, TimeUnits.NANOSECONDS)
assert utils.from_pretty_time('10us') == (10, TimeUnits.MICROSECONDS)
assert utils.from_pretty_time('10m') == (10, TimeUnits.MINUTES)
assert utils.from_pretty_time('10h') == (10, TimeUnits.HOURS)
assert utils.from_pretty_time('10d') == (10, TimeUnits.DAYS)
assert utils.from_pretty_time('inf') == (None, TimeUnits.INFINITE)
assert utils.from_pretty_time('def') == (None, TimeUnits.DEFAULT)
def test_from_pretty_time_invalid_format(self):
with pytest.raises(ValueError):
utils.from_pretty_time('10')
with pytest.raises(ValueError):
utils.from_pretty_time('s')
with pytest.raises(ValueError):
utils.from_pretty_time('10S')
with pytest.raises(ValueError):
utils.from_pretty_time('10s1')
with pytest.raises(ValueError):
utils.from_pretty_time('10ss')
|
mit
| -5,564,530,746,362,472,000
| 39.625
| 77
| 0.641538
| false
| 3.457447
| true
| false
| false
|
stormi/tsunami
|
src/secondaires/magie/types/parchemin.py
|
1
|
4405
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type Parchemin."""
from primaires.interpreteur.editeur.uniligne import Uniligne
from bases.objet.attribut import Attribut
from primaires.objet.types.base import BaseType
class Parchemin(BaseType):
"""Type d'objet: parchemin.
"""
nom_type = "parchemin"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self._cle_sort = ""
self.charges = 1
self.etendre_editeur("s", "sort", Uniligne, self, "cle_sort")
self.etendre_editeur("c", "charges", Uniligne, self, "charges")
def _get_cle_sort(self):
return self._cle_sort
def _set_cle_sort(self, sort):
sorts = [sort.cle for sort in type(self).importeur.magie.sorts.values()]
if sort in sorts:
self._cle_sort = sort
cle_sort = property(_get_cle_sort, _set_cle_sort)
@property
def sort(self):
"""Renvoie le sort de ce parchemin."""
if self.cle_sort:
return type(self).importeur.magie.sorts[self.cle_sort]
else:
return None
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
sort = enveloppes["s"]
sort.apercu = "{objet.cle_sort}"
sort.prompt = "Clé du sort : "
sort.aide_courte = \
"Entrez la |ent|clé|ff| du sort contenu dans ce parchemin. Il " \
"va sans dire que le sort\nen question doit être déjà créé. " \
"Entrez |cmd|/|ff| pour revenir à la fenêtre parente.\n\n" \
"Sort actuel : {objet.cle_sort}"
sort.type = str
charges = enveloppes["c"]
charges.apercu = "{objet.charges}"
charges.prompt = "Nombre de charges : "
charges.aide_courte = \
"Entrez le |ent|nombre|ff| de charges du parchemin ; ce nombre " \
"correspond à la quantité\nde sorts que l'on peut lancer avant " \
"que le parchemin soit inutilisable.\n" \
"Entrez |cmd|/|ff| pour revenir à la fenêtre parente.\n\n" \
"Charges actuelles : {objet.charges}"
charges.type = int
@staticmethod
def regarder(objet, personnage):
"""Le personnage regarde l'objet."""
msg = BaseType.regarder(objet, personnage)
if getattr(objet, "sort", False):
de = "de"
if objet.sort.nom[0] in ["a", "e", "i", "o", "u", "y"]:
de = "d'"
if objet.charges > 0:
s = "s" if objet.charges > 1 else ""
msg += "\nCe parchemin contient " + str(objet.charges)
msg += " charge" + s + " du sort " + de + " " + objet.sort.nom
msg += "."
else:
msg += "\nCe parchemin ne contient plus aucune charge."
return msg
|
bsd-3-clause
| -7,939,619,277,976,388,000
| 39.666667
| 80
| 0.636612
| false
| 3.536232
| false
| false
| false
|
ericdill/bluesky
|
bluesky/simple_scans.py
|
1
|
14733
|
"""
These "scans" bundle a Message generator with an instance of the RunEngine,
combining two separate concepts -- instructions and execution -- into one
object. This makes the interface less flexible and somewhat less "Pythonic"
but more condensed.
This module is meant to be run in a namespace where several global
variables have been defined. If some variables are left undefined, the
associated scans will be not usable.
DETS # list of detectors
MASTER_DET # detector to use for tw
MASTER_DET_FIELD # detector field to use for tw
H_MOTOR
K_MOTOR
L_MOTOR
TH_MOTOR
TTH_MOTOR
TEMP_CONTROLLER
Page numbers in the code comments refer to the SPEC manual at
http://www.certif.com/downloads/css_docs/spec_man.pdf
"""
from inspect import signature
import matplotlib.pyplot as plt
from bluesky import scans
from bluesky.callbacks import LiveTable, LivePlot, LiveRaster, _get_obj_fields
from bluesky.scientific_callbacks import PeakStats
from boltons.iterutils import chunked
from bluesky.global_state import gs
from bluesky.utils import normalize_subs_input, Subs, DefaultSubs
from collections import defaultdict
from itertools import filterfalse, chain
# ## Factory functions acting a shim between scans and callbacks ###
def table_from_motors(scan):
"Setup a LiveTable by inspecting a scan and gs."
# > 1 motor
return LiveTable(list(scan.motors) + gs.TABLE_COLS)
def table_from_motor(scan):
"Setup a LiveTable by inspecting a scan and gs."
# 1 motor
return LiveTable([scan.motor] + gs.TABLE_COLS)
def table_gs_only(scan):
"Setup a LiveTable by inspecting a scan and gs."
# no motors
return LiveTable(gs.TABLE_COLS)
def plot_first_motor(scan):
"Setup a LivePlot by inspecting a scan and gs."
fig_name = 'BlueSky: {} v {}'.format(list(scan.motors)[0].name, gs.PLOT_Y)
fig = plt.figure(fig_name)
if not gs.OVERPLOT:
fig.clf()
return LivePlot(gs.PLOT_Y, list(scan.motors)[0].name, fig=fig)
def plot_motor(scan):
"Setup a LivePlot by inspecting a scan and gs."
fig_name = 'BlueSky: {} v {}'.format(scan.motor.name, gs.PLOT_Y)
fig = plt.figure(fig_name)
if not gs.OVERPLOT:
fig.clf()
return LivePlot(gs.PLOT_Y, scan.motor.name, fig=fig)
def raster(scan):
"Set up a LiveRaster by inspect a scan and gs."
if len(scan.shape) != 2:
return None
# first motor is 'slow' -> Y axis
ylab, xlab = _get_obj_fields(scan.motors)
# shape goes in (rr, cc)
# extents go in (x, y)
return LiveRaster(scan.shape, gs.MASTER_DET_FIELD, xlabel=xlab,
ylabel=ylab, extent=list(chain(*scan.extents[::-1])))
def peakstats_first_motor(scan):
"Set up peakstats"
ps = PeakStats(_get_obj_fields([list(scan.motors)[0]])[0],
gs.MASTER_DET_FIELD, edge_count=3)
gs.PS = ps
return ps
def peakstats(scan):
"Set up peakstats"
ps = PeakStats(_get_obj_fields([scan.motor])[0],
gs.MASTER_DET_FIELD, edge_count=3)
gs.PS = ps
return ps
class _BundledScan:
default_subs = DefaultSubs({})
default_sub_factories = DefaultSubs({})
# These are set to the defaults at init time.
subs = Subs({})
sub_factories = Subs({})
def __init__(self):
# subs and sub_factories can be set individually per instance
self.subs = dict(self.default_subs)
self.sub_factories = dict(self.default_sub_factories)
self.params = list(signature(self.scan_class).parameters.keys())
self.configuration = {}
self.flyers = []
def __call__(self, *args, subs=None, sub_factories=None, **kwargs):
scan_kwargs = dict()
# Any kwargs valid for the scan go to the scan, not the RE.
for k, v in kwargs.copy().items():
if k in self.params:
scan_kwargs[k] = kwargs.pop(k)
from bluesky.global_state import gs
RE_params = list(signature(gs.RE.__call__).parameters.keys())
if set(RE_params) & set(self.params):
raise AssertionError("The names of the scan's arguments clash "
"the RunEngine arguments. Use different "
"names. Avoid: {0}".format(RE_params))
global_dets = gs.DETS if gs.DETS is not None else []
self.scan = self.scan_class(global_dets, *args, **scan_kwargs)
# Combine subs passed as args and subs set up in subs attribute.
_subs = defaultdict(list)
_update_lists(_subs, normalize_subs_input(subs))
_update_lists(_subs, normalize_subs_input(self.subs))
# Create a sub from each sub_factory.
_update_lists(_subs, _run_factories(sub_factories, self.scan))
_update_lists(_subs, _run_factories(self.sub_factories, self.scan))
# Set up scan attributes.
self.scan.configuration = self.configuration
global_flyers = gs.FLYERS if gs.FLYERS is not None else []
self.scan.flyers = list(set(list(self.flyers) + list(global_flyers)))
# Any remainging kwargs go the RE. To be safe, no args are passed
# to RE; RE args effectively become keyword-only arguments.
return gs.RE(self.scan, _subs, **kwargs)
def _update_lists(out, inp):
"""Extends dictionary `out` lists with those in `inp`
Assumes dictionaries where all values are lists
"""
for k, v in inp.items():
try:
out[k].extend(v)
except KeyError:
out[k] = list(v)
def _run_factories(factories, scan):
'''Run sub factory functions for a scan
Factory functions should return lists, which will be added onto the
subscription key (e.g., 'all' or 'start') specified in the factory
definition.
If the factory function returns None, the list will not be modified.
'''
factories = normalize_subs_input(factories)
out = {k: list(filterfalse(lambda x: x is None,
(sf(scan) for sf in v)))
for k, v in factories.items()}
gs._SECRET_STASH = out
return out
# ## Mid-level base classes ###
# These are responsible for popping off the time arg and adjusting the
# interval. SPEC counts "bonds;" idiomatic Python counts "sites."
class _OuterProductScan(_BundledScan):
default_sub_factories = DefaultSubs({'all': [table_from_motors]})
def __call__(self, *args, time=None, subs=None, **kwargs):
args = list(args)
if len(args) % 4 == 1:
if time is None:
time = args.pop(-1)
else:
raise ValueError("wrong number of positional arguments")
original_times = _set_acquire_time(time)
for i, _ in enumerate(chunked(list(args), 4)):
# intervals -> intervals + 1
args[4*i + 3] += 1
# never snake; SPEC doesn't know how
if i != 0:
args.insert(4*(i + 1), False)
result = super().__call__(*args, subs=subs, **kwargs)
_unset_acquire_time(original_times)
return result
class _InnerProductScan(_BundledScan):
default_sub_factories = DefaultSubs(
{'all': [table_from_motors, plot_first_motor,
peakstats_first_motor]})
def __call__(self, *args, time=None, subs=None, **kwargs):
args = list(args)
if len(args) % 3 == 2:
if time is None:
time = args.pop(-1)
else:
raise ValueError("wrong number of positional arguments")
intervals = args.pop(-1) + 1
original_times = _set_acquire_time(time)
result = super().__call__(intervals, *args, subs=subs, **kwargs)
_unset_acquire_time(original_times)
return result
class _StepScan(_BundledScan):
default_sub_factories = DefaultSubs(
{'all': [table_from_motor, plot_motor,
peakstats]})
def __call__(self, motor, start, finish, intervals, time=None,
subs=None, **kwargs):
"""Invoke the scan
Parameters
----------
motor
start : number
The start point of the motion
finish : number
The finish point of the motion
intervals : int
The number of steps between `start` and `finish`
time : number
The acquire time of the detector(s)?
subs : dict
The temporary subscriptions to add to **this scan only**. These
subscriptions are **not** persistent
"""
original_times = _set_acquire_time(time)
result = super().__call__(motor, start, finish, intervals + 1,
subs=subs, **kwargs)
_unset_acquire_time(original_times)
return result
class _HardcodedMotorStepScan(_BundledScan):
# Subclasses must define self.motor as a property.
default_sub_factories = DefaultSubs(
{'all': [table_from_motor, plot_motor]})
def __call__(self, start, finish, intervals, time=None, subs=None,
**kwargs):
original_times = _set_acquire_time(time)
result = super().__call__(self.motor, start, finish,
intervals + 1, subs=subs, **kwargs)
_unset_acquire_time(original_times)
return result
### Counts (p. 140) ###
class Count(_BundledScan):
"ct"
scan_class = scans.Count
default_sub_factories = DefaultSubs({'all': [table_gs_only]})
def __call__(self, time=None, subs=None, **kwargs):
original_times = _set_acquire_time(time)
result = super().__call__(subs=subs, **kwargs)
_unset_acquire_time(original_times)
return result
### Motor Scans (p. 146) ###
class AbsScan(_StepScan):
"ascan"
scan_class = scans.AbsScan
class OuterProductAbsScan(_OuterProductScan):
"mesh"
default_sub_factories = DefaultSubs({'all': [table_from_motors, raster]})
scan_class = scans.OuterProductAbsScan
class InnerProductAbsScan(_InnerProductScan):
"a2scan, a3scan, etc."
scan_class = scans.InnerProductAbsScan
class DeltaScan(_StepScan):
"dscan (also known as lup)"
scan_class = scans.DeltaScan
class InnerProductDeltaScan(_InnerProductScan):
"d2scan, d3scan, etc."
scan_class = scans.InnerProductDeltaScan
class ThetaTwoThetaScan(_InnerProductScan):
"th2th"
scan_class = scans.InnerProductDeltaScan
def __call__(self, start, finish, intervals, time=None, **kwargs):
TTH_MOTOR = gs.TTH_MOTOR
TH_MOTOR = gs.TH_MOTOR
original_times = _set_acquire_time(time)
result = super().__call__(TTH_MOTOR, start, finish,
TH_MOTOR, start/2, finish/2,
intervals, time, **kwargs)
_unset_acquire_time(original_times)
### Temperature Scans (p. 148) ###
class _TemperatureScan(_HardcodedMotorStepScan):
def __call__(self, start, finish, intervals, time=None, sleep=0,
**kwargs):
self._sleep = sleep
original_times = _set_acquire_time(time)
self.motor.settle_time = sleep
result = super().__call__(start, finish, intervals + 1, **kwargs)
_unset_acquire_time(original_times)
return result
@property
def motor(self):
from bluesky.global_state import gs
return gs.TEMP_CONTROLLER
class AbsTemperatureScan(_TemperatureScan):
"tscan"
scan_class = scans.AbsScan
class DeltaTemperatureScan(_TemperatureScan):
"dtscan"
scan_class = scans.DeltaScan
### Basic Reciprocal Space Scans (p. 147) ###
class HScan(_HardcodedMotorStepScan):
"hscan"
scan_class = scans.AbsScan
@property
def motor(self):
from bluesky.global_state import gs
return gs.H_MOTOR
class KScan(_HardcodedMotorStepScan):
"kscan"
scan_class = scans.AbsScan
@property
def motor(self):
from bluesky.global_state import gs
return gs.K_MOTOR
class LScan(_HardcodedMotorStepScan):
"lscan"
scan_class = scans.AbsScan
@property
def motor(self):
from bluesky.global_state import gs
return gs.L_MOTOR
class OuterProductHKLScan(_BundledScan):
"hklmesh"
scan_class = scans.OuterProductAbsScan
def __call__(self, Q1, start1, finish1, intervals1, Q2, start2, finish2,
intervals2, time=None, **kwargs):
# To be clear, like all other functions in this module, this
# eye-gouging API is for compatbility with SPEC, not the author's
# idea of good Python code.
from bluesky.global_state import gs
H_MOTOR = gs.H_MOTOR
K_MOTOR = gs.K_MOTOR
L_MOTOR = gs.L_MOTOR
original_times = _set_acquire_time(time)
_motor_mapping = {'H': H_MOTOR, 'K': K_MOTOR, 'L': L_MOTOR}
motor1 = _motor_mapping[Q1]
motor2 = _motor_mapping[Q2]
# Note that intervals + 1 is handled in the base class.
result = super().__call__(motor1, start1, finish1, intervals1,
motor2, start2, finish2, intervals2,
**kwargs)
_unset_acquire_time(original_times)
return result
class InnerProductHKLScan(_BundledScan):
"hklscan"
scan_class = scans.InnerProductAbsScan
def __call__(self, start_h, finish_h, start_k, finish_k, start_l,
finish_l, intervals, time=None, **kwargs):
from bluesky.global_state import gs
H_MOTOR = gs.H_MOTOR
K_MOTOR = gs.K_MOTOR
L_MOTOR = gs.L_MOTOR
original_times = _set_acquire_time(time)
result = super().__call__(intervals, start_h, finish_h, start_k,
finish_k, start_l, finish_l, **kwargs)
_unset_acquire_time(original_times)
### Special Reciprocal Space Scans ###
# TODO:
# klradial
# hlradial
# hkradial
# klcircle
# hlcircle
# hkcircle
class Tweak(_BundledScan):
"tw"
scan_class = scans.Tweak
def __call__(motor, step, **kwargs):
from bluesky.global_state import gs
MASTER_DET = gs.MASTER_DET
MASTER_DET_FIELD = gs.MASTER_DET_FIELD
return super().__call__(MASTER_DET, MASTER_DET_FIELD, motor,
step, **kwargs)
def _set_acquire_time(time):
from bluesky.global_state import gs
if time is None:
time = gs.COUNT_TIME
original_times = {}
for det in gs.DETS:
if hasattr(det, 'count_time'):
original_times[det] = det.count_time
det.count_time = time
return original_times
def _unset_acquire_time(original_times):
for det, time in original_times.items():
det.count_time = time
|
bsd-3-clause
| 1,619,961,103,741,633,800
| 30.280255
| 78
| 0.614539
| false
| 3.628818
| false
| false
| false
|
walterbender/turtleconfusion
|
TurtleArt/tatype.py
|
1
|
15982
|
# Copyright (c) 2013 Marion Zepf
# Copyright (c) 2014 Walter Bender
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" type system for Primitives and their arguments """
import ast
from tablock import Media
from taconstants import (Color, ColorObj, CONSTANTS, Vector)
class Type(object):
""" A type in the type hierarchy. """
def __init__(self, constant_name, value):
""" constant_name -- the name of the constant that points to this Type
object
value -- an arbitrary integer that is different from the values of
all other Types. The order of the integers doesn't matter. """
self.constant_name = constant_name
self.value = value
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Type):
return False
return self.value == other.value
def __str__(self):
return str(self.constant_name)
__repr__ = __str__
class TypeDisjunction(tuple, Type):
""" Disjunction of two or more Types (from the type hierarchy) """
def __init__(self, iterable):
self = tuple(iterable)
def __str__(self):
s = ["("]
for disj in self:
s.append(str(disj))
s.append(" or ")
s.pop()
s.append(")")
return "".join(s)
# individual types
TYPE_OBJECT = Type('TYPE_OBJECT', 0)
TYPE_CHAR = Type('TYPE_CHAR', 1)
TYPE_COLOR = Type('TYPE_COLOR', 2)
TYPE_FLOAT = Type('TYPE_FLOAT', 3)
TYPE_INT = Type('TYPE_INT', 4)
TYPE_BOOL = Type('TYPE_BOOL', 5)
# shortcut to avoid a TypeDisjunction between TYPE_FLOAT and TYPE_INT
TYPE_NUMBER = Type('TYPE_NUMBER', 6)
TYPE_NUMERIC_STRING = Type('TYPE_NUMERIC_STRING', 7)
TYPE_BOX = Type('TYPE_BOX', 8) # special type for the unknown content of a box
TYPE_STRING = Type('TYPE_STRING', 9)
TYPE_MEDIA = Type('TYPE_MEDIA', 10)
# An array of numbers used by the food plugin et al.
TYPE_VECTOR = Type('TYPE_VECTOR', 11)
# groups/ classes of types
TYPES_NUMERIC = (TYPE_FLOAT, TYPE_INT, TYPE_NUMBER)
BOX_AST = ast.Name(id='BOX', ctx=ast.Load)
ACTION_AST = ast.Name(id='ACTION', ctx=ast.Load)
def get_type(x):
""" Return the most specific type in the type hierarchy that applies to x
and a boolean indicating whether x is an AST. If the type cannot be
determined, return TYPE_OBJECT as the type. """
# non-AST types
if isinstance(x, (int, long)):
return (TYPE_INT, False)
elif isinstance(x, float):
return (TYPE_FLOAT, False)
elif isinstance(x, basestring):
if len(x) == 1:
return (TYPE_CHAR, False)
try:
float(x)
except ValueError:
return (TYPE_STRING, False)
else:
return (TYPE_NUMERIC_STRING, False)
elif isinstance(x, Color):
return (TYPE_COLOR, False)
elif isinstance(x, Media):
return (TYPE_MEDIA, False)
elif isinstance(x, Vector):
return (TYPE_VECTOR, False)
elif hasattr(x, "return_type"):
return (x.return_type, False)
# AST types
elif isinstance(x, ast.Num):
return (get_type(x.n)[0], True)
elif isinstance(x, ast.Str):
return (get_type(x.s)[0], True)
elif isinstance(x, ast.Name):
try:
# we need to have imported CONSTANTS for this to work
value = eval(x.id)
except NameError:
return (TYPE_OBJECT, True)
else:
return (get_type(value)[0], True)
elif isinstance(x, ast.Subscript):
if x.value == BOX_AST:
return (TYPE_BOX, True)
elif isinstance(x, ast.Call):
if isinstance(x.func, ast.Name):
if x.func.id == 'float':
return (TYPE_FLOAT, True)
elif x.func.id in ('int', 'ord'):
return (TYPE_INT, True)
elif x.func.id == 'chr':
return (TYPE_CHAR, True)
elif x.func.id in ('repr', 'str', 'unicode'):
return (TYPE_STRING, True)
elif x.func.id == 'Color':
return (TYPE_COLOR, True)
elif x.func.id == 'Media':
return (TYPE_MEDIA, True)
# unary operands never change the type of their argument
elif isinstance(x, ast.UnaryOp):
if issubclass(x.op, ast.Not):
# 'not' always returns a boolean
return (TYPE_BOOL, True)
else:
return get_type(x.operand)
# boolean and comparison operators always return a boolean
if isinstance(x, (ast.BoolOp, ast.Compare)):
return (TYPE_BOOL, True)
# other binary operators
elif isinstance(x, ast.BinOp):
type_left = get_type(x.left)[0]
type_right = get_type(x.right)[0]
if type_left == TYPE_STRING or type_right == TYPE_STRING:
return (TYPE_STRING, True)
if type_left == type_right == TYPE_INT:
return (TYPE_INT, True)
else:
return (TYPE_FLOAT, True)
return (TYPE_OBJECT, isinstance(x, ast.AST))
def is_instancemethod(method):
# TODO how to access the type `instancemethod` directly?
return type(method).__name__ == "instancemethod"
def is_bound_method(method):
return ((is_instancemethod(method) and method.im_self is not None) or
(hasattr(method, '__self__') and method.__self__ is not None))
def is_staticmethod(method):
# TODO how to access the type `staticmethod` directly?
return type(method).__name__ == "staticmethod"
def identity(x):
return x
TYPE_CONVERTERS = {
# Type hierarchy: If there is a converter A -> B, then A is a subtype of B.
# The converter from A to B is stored under TYPE_CONVERTERS[A][B].
# The relation describing the type hierarchy must be transitive, i.e.
# converting A -> C must yield the same result as converting A -> B -> C.
# TYPE_OBJECT is the supertype of everything.
TYPE_BOX: {
TYPE_COLOR: ColorObj, # FIXME: should be Color.name
TYPE_VECTOR: Vector,
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_NUMBER: float,
TYPE_STRING: str},
TYPE_CHAR: {
TYPE_INT: ord,
TYPE_STRING: identity},
TYPE_COLOR: {
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_NUMBER: int,
TYPE_STRING: Color.get_number_string},
TYPE_FLOAT: {
TYPE_INT: int,
TYPE_NUMBER: identity,
TYPE_STRING: str},
TYPE_INT: {
TYPE_FLOAT: float,
TYPE_NUMBER: identity,
TYPE_STRING: str},
TYPE_NUMBER: {
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_STRING: str},
TYPE_NUMERIC_STRING: {
TYPE_FLOAT: float,
TYPE_STRING: identity}
}
class TATypeError(BaseException):
""" TypeError with the types from the hierarchy, not with Python types """
def __init__(self, bad_value, bad_type=None, req_type=None, message=''):
""" bad_value -- the mis-typed value that caused the error
bad_type -- the type of the bad_value
req_type -- the type that the value was expected to have
message -- short statement about the cause of the error. It is
not shown to the user, but may appear in debugging output. """
self.bad_value = bad_value
self.bad_type = bad_type
self.req_type = req_type
self.message = message
def __str__(self):
msg = []
if self.message:
msg.append(self.message)
msg.append(" (")
msg.append("bad value: ")
msg.append(repr(self.bad_value))
if self.bad_type is not None:
msg.append(", bad type: ")
msg.append(repr(self.bad_type))
if self.req_type is not None:
msg.append(", req type: ")
msg.append(repr(self.req_type))
if self.message:
msg.append(")")
return "".join(msg)
__repr__ = __str__
def get_converter(old_type, new_type):
""" If there is a converter old_type -> new_type, return it. Else return
None. If a chain of converters is necessary, return it as a tuple or
list (starting with the innermost, first-to-apply converter). """
# every type can be converted to TYPE_OBJECT
if new_type == TYPE_OBJECT:
return identity
# every type can be converted to itself
if old_type == new_type:
return identity
# is there a converter for this pair of types?
converters_from_old = TYPE_CONVERTERS.get(old_type)
if converters_from_old is None:
return None
converter = converters_from_old.get(new_type)
if converter is not None:
return converter
else:
# form the transitive closure of all types that old_type can be
# converted to, and look for new_type there
backtrace = converters_from_old.copy()
new_backtrace = backtrace.copy()
break_all = False
while True:
newest_backtrace = {}
for t in new_backtrace:
for new_t in TYPE_CONVERTERS.get(t, {}):
if new_t not in backtrace:
newest_backtrace[new_t] = t
backtrace[new_t] = t
if new_t == new_type:
break_all = True
break
if break_all:
break
if break_all or not newest_backtrace:
break
new_backtrace = newest_backtrace
# use the backtrace to find the path from old_type to new_type
if new_type in backtrace:
converter_chain = []
t = new_type
while t in backtrace and isinstance(backtrace[t], Type):
converter_chain.insert(0, TYPE_CONVERTERS[backtrace[t]][t])
t = backtrace[t]
converter_chain.insert(0, TYPE_CONVERTERS[old_type][t])
return converter_chain
return None
def convert(x, new_type, old_type=None, converter=None):
""" Convert x to the new type if possible.
old_type -- the type of x. If not given, it is computed. """
if not isinstance(new_type, Type):
raise ValueError('%s is not a type in the type hierarchy'
% (repr(new_type)))
# every type can be converted to TYPE_OBJECT
if new_type == TYPE_OBJECT:
return x
if not isinstance(old_type, Type):
(old_type, is_an_ast) = get_type(x)
else:
is_an_ast = isinstance(x, ast.AST)
# every type can be converted to itself
if old_type == new_type:
return x
# special case: 'box' block (or 'pop' block) as an AST
if is_an_ast and old_type == TYPE_BOX:
new_type_ast = ast.Name(id=new_type.constant_name)
return get_call_ast('convert', [x, new_type_ast], return_type=new_type)
# if the converter is not given, try to find one
if converter is None:
converter = get_converter(old_type, new_type)
if converter is None:
# no converter available
raise TATypeError(
bad_value=x,
bad_type=old_type,
req_type=new_type,
message=(
"found no converter"
" for this type combination"))
def _apply_converter(converter, y):
try:
if is_an_ast:
if converter == identity:
return y
elif is_instancemethod(converter):
func = ast.Attribute(value=y,
attr=converter.im_func.__name__,
ctx=ast.Load)
return get_call_ast(func)
else:
func_name = converter.__name__
return get_call_ast(func_name, [y])
else:
return converter(y)
except BaseException:
raise TATypeError(bad_value=x, bad_type=old_type,
req_type=new_type, message=("error during "
"conversion"))
if isinstance(converter, (list, tuple)):
# apply the converter chain recursively
result = x
for conv in converter:
result = _apply_converter(conv, result)
return result
elif converter is not None:
return _apply_converter(converter, x)
class TypedAST(ast.AST):
@property
def return_type(self):
if self._return_type is None:
return get_type(self.func)[0]
else:
return self._return_type
class TypedCall(ast.Call, TypedAST):
""" Like a Call AST, but with a return type """
def __init__(self, func, args=None, keywords=None, starargs=None,
kwargs=None, return_type=None):
if args is None:
args = []
if keywords is None:
keywords = []
ast.Call.__init__(self, func=func, args=args, keywords=keywords,
starargs=starargs, kwargs=kwargs)
self._return_type = return_type
class TypedSubscript(ast.Subscript, TypedAST):
""" Like a Subscript AST, but with a type """
def __init__(self, value, slice_, ctx=ast.Load, return_type=None):
ast.Subscript.__init__(self, value=value, slice=slice_, ctx=ctx)
self._return_type = return_type
class TypedName(ast.Name, TypedAST):
""" Like a Name AST, but with a type """
def __init__(self, id_, ctx=ast.Load, return_type=None):
ast.Name.__init__(self, id=id_, ctx=ctx)
self._return_type = return_type
def get_call_ast(func_name, args=None, kwargs=None, return_type=None):
""" Return an AST representing the call to a function with the name
func_name, passing it the arguments args (given as a list) and the
keyword arguments kwargs (given as a dictionary).
func_name -- either the name of a callable as a string, or an AST
representing a callable expression
return_type -- if this is not None, return a TypedCall object with this
return type instead """
if args is None:
args = []
# convert keyword argument dict to a list of (key, value) pairs
keywords = []
if kwargs is not None:
for (key, value) in kwargs.iteritems():
keywords.append(ast.keyword(arg=key, value=value))
# get or generate the AST representing the callable
if isinstance(func_name, ast.AST):
func_ast = func_name
else:
func_ast = ast.Name(id=func_name, ctx=ast.Load)
# if no return type is given, return a simple Call AST
if return_type is None:
return ast.Call(func=func_ast, args=args, keywords=keywords,
starargs=None, kwargs=None)
# if a return type is given, return a TypedCall AST
else:
return TypedCall(func=func_ast, args=args, keywords=keywords,
return_type=return_type)
|
mit
| 6,725,969,836,886,066,000
| 33.743478
| 79
| 0.589163
| false
| 3.941307
| false
| false
| false
|
infrae/mobi.devices
|
src/mobi/devices/tests/test_doctest_wurfl_parsing.py
|
1
|
1866
|
# Copyright (c) 2010 Infrae. All rights reserved.
# See also LICENSE.txt.
"""
We will start by initializing the database from wurfl stream.
It should return a tuple (db, index)
>>> from mobi.devices.index.radixtree import NOTSET
>>> from mobi.devices.wurfl.db import initialize_db
>>> db, index = initialize_db(config)
>>> db is not None
True
>>> index #doctest: +ELLIPSIS
<mobi.devices.index.radixtree.RadixTree ...>
Now we'll have a look at what's inside the index.
>>> user_agent = 'Mozilla/5.0 (iPhone; ...'
>>> node, string, pos = index.search(user_agent)
>>> node.value
<class 'mobi.devices.index.radixtree.NOTSET'>
>>> string
u'Mozilla/5.0 (iPhone; '
>>> pos
21
>>> dev_id = node.values().next()
Let's look that up into the database.
>>> from mobi.devices.wurfl.db import Device
>>> device = Device.deserialize(db[dev_id])
>>> device #doctest: +ELLIPSIS
<mobi.devices.wurfl.parser.Device user_agent="Mozilla/5.0 (iPhone; ...
>>> int(device.get_capability('xhtml_support_level'))
4
>>> device.parent_id
u'apple_iphone_ver2'
>>> device.type
<InterfaceClass mobi.interfaces.devices.IAdvancedDeviceType>
>>> device.platform
u'iphone'
"""
import shutil
import os
from mobi.devices.wurfl.parser import Device
data_dir = os.path.join(os.path.dirname(__file__), 'var')
config = {
'var': data_dir
}
def setup(test):
teardown(test)
try:
os.mkdir(data_dir)
except OSError:
pass
def teardown(test):
try:
if Device.db:
Device.db.close()
shutil.rmtree(data_dir)
except:
pass
def test_suite():
import unittest
import doctest
suite = unittest.TestSuite()
suite.addTest(
doctest.DocTestSuite(__name__, setUp=setup, tearDown=teardown))
return suite
|
bsd-3-clause
| 7,810,178,486,091,748,000
| 23.88
| 74
| 0.635048
| false
| 3.417582
| true
| false
| false
|
cedriclaunay/gaffer
|
python/GafferUI/PlugWidget.py
|
1
|
5374
|
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import warnings
import IECore
import Gaffer
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
## The PlugWidget combines a LabelPlugValueWidget with a second PlugValueWidget
## suitable for editing the plug.
## \todo This could provide functionality for arbitrary Widgets to be placed
## on the right, which combined with the ability to find a
## PlugWidget given a Plug could be quite useful for many things.
## \todo Remove deprecated label and description capabilities.
class PlugWidget( GafferUI.Widget ) :
def __init__( self, plugOrWidget, label=None, description=None, **kw ) :
GafferUI.Widget.__init__( self, QtGui.QWidget(), **kw )
layout = QtGui.QHBoxLayout()
layout.setContentsMargins( 0, 0, 0, 0 )
layout.setSpacing( 4 )
layout.setSizeConstraint( QtGui.QLayout.SetMinAndMaxSize )
self._qtWidget().setLayout( layout )
if isinstance( plugOrWidget, Gaffer.Plug ) :
self.__valueWidget = GafferUI.PlugValueWidget.create( plugOrWidget )
plug = plugOrWidget
else :
assert( isinstance( plugOrWidget, GafferUI.PlugValueWidget ) or hasattr( plugOrWidget, "plugValueWidget" ) )
self.__valueWidget = plugOrWidget
plug = self.plugValueWidget().getPlug()
self.__label = GafferUI.LabelPlugValueWidget(
plug,
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right,
verticalAlignment = GafferUI.Label.VerticalAlignment.Top,
)
## \todo Decide how we allow this sort of tweak using the public
# interface. Perhaps we should have a SizeableContainer or something?
self.__label.label()._qtWidget().setFixedWidth( self.labelWidth() )
if label is not None :
warnings.warn(
"The PlugWidget label parameter is deprecated. Use Metadata instead.",
DeprecationWarning,
2
)
self.__label.label().setText( label )
if description is not None :
warnings.warn(
"The PlugWidget description parameter is deprecated. Use Metadata instead.",
DeprecationWarning,
2
)
self.__label.label().setToolTip( description )
layout.addWidget( self.__label._qtWidget() )
layout.addWidget( self.__valueWidget._qtWidget() )
# The plugValueWidget() may have smarter drop behaviour than the labelPlugValueWidget(),
# because it has specialised PlugValueWidget._dropValue(). It's also more meaningful to the
# user if we highlight the plugValueWidget() on dragEnter rather than the label. So we
# forward the dragEnter/dragLeave/drop signals from the labelPlugValueWidget() to the plugValueWidget().
self.__dragEnterConnection = self.__label.dragEnterSignal().connect( 0, Gaffer.WeakMethod( self.__labelDragEnter ) )
self.__dragLeaveConnection = self.__label.dragLeaveSignal().connect( 0, Gaffer.WeakMethod( self.__labelDragLeave ) )
self.__dropConnection = self.__label.dropSignal().connect( 0, Gaffer.WeakMethod( self.__labelDrop ) )
def plugValueWidget( self ) :
if isinstance( self.__valueWidget, GafferUI.PlugValueWidget ) :
return self.__valueWidget
else :
return self.__valueWidget.plugValueWidget()
def labelPlugValueWidget( self ) :
return self.__label
@staticmethod
def labelWidth() :
return 150
def __labelDragEnter( self, label, event ) :
return self.plugValueWidget().dragEnterSignal()( self.plugValueWidget(), event )
def __labelDragLeave( self, label, event ) :
return self.plugValueWidget().dragLeaveSignal()( self.plugValueWidget(), event )
def __labelDrop( self, label, event ) :
return self.plugValueWidget().dropSignal()( self.plugValueWidget(), event )
|
bsd-3-clause
| 1,593,743,067,569,437,400
| 38.514706
| 118
| 0.716971
| false
| 4.022455
| false
| false
| false
|
iamdober/training
|
final/task3/task3.py
|
1
|
2189
|
#
# In lecture, we took the bipartite Marvel graph,
# where edges went between characters and the comics
# books they appeared in, and created a weighted graph
# with edges between characters where the weight was the
# number of comic books in which they both appeared.
#
# In this assignment, determine the weights between
# comic book characters by giving the probability
# that a randomly chosen comic book containing one of
# the characters will also contain the other
#
# from marvel import marvel, characters
def get_books_list(bipartiteG, characters):
books = []
for key in bipartiteG:
if key not in characters:
books.append(key)
return books
def calc_prob(bipartiteG, books, a, b):
# books = get_books_list(bipartiteG, characters)
books_total = 0.
books_both = 0.
for book in books:
if a in bipartiteG[book] or b in bipartiteG[book]:
books_total += 1
if a in bipartiteG[book] and b in bipartiteG[book]:
books_both += 1
if not books_both:
return None
return books_both/books_total
def create_weighted_graph(bipartiteG, characters):
books = get_books_list(bipartiteG, characters)
G = {}
for i in characters:
G[i] = {}
for charA in characters:
for charB in characters:
if charA != charB:
prob = calc_prob(bipartiteG, books, charA, charB)
G[charA][charB] = prob
G[charB][charA] = prob
return G
######
#
# Test
def test():
bipartiteG = {'charA':{'comicB':1, 'comicC':1},
'charB':{'comicB':1, 'comicD':1},
'charC':{'comicD':1},
'comicB':{'charA':1, 'charB':1},
'comicC':{'charA':1},
'comicD': {'charC':1, 'charB':1}}
G = create_weighted_graph(bipartiteG, ['charA', 'charB', 'charC'])
# three comics contain charA or charB
# charA and charB are together in one of them
assert G['charA']['charB'] == 1.0 / 3
assert G['charA'].get('charA') == None
assert G['charA'].get('charC') == None
# def test2():
# G = create_weighted_graph(marvel, characters)
test()
|
gpl-3.0
| 2,350,563,976,907,062,000
| 29.416667
| 70
| 0.604842
| false
| 3.480127
| false
| false
| false
|
evanbrumley/psmove-restful
|
utils.py
|
1
|
8083
|
import sys, os, time
from threading import Thread
import requests
PSMOVEAPI_BUILD_DIR = os.environ.get('PSMOVEAPI_BUILD_DIR')
if PSMOVEAPI_BUILD_DIR:
sys.path.insert(0, os.environ['PSMOVEAPI_BUILD_DIR'])
import psmove
class Controller(object):
_active = False
_loop_thread = None
controller = None
read_only = False
red = 0
green = 0
blue = 0
ax = 0
ay = 0
az = 0
gx = 0
gy = 0
gz = 0
btn_triangle = False
btn_circle = False
btn_cross = False
btn_square = False
btn_select = False
btn_start = False
btn_move = False
btn_t = False
btn_ps = False
battery = 0
trigger = 0
rumble = 0
def __init__(self, controller, read_only=False):
self.controller = controller
self.read_only = read_only
self.start_loop()
def terminate(self):
self._active = False
if self._loop_thread:
self._loop_thread.join()
def _loop(self):
while(self._active):
if not self.read_only:
self.controller.set_leds(self.red, self.green, self.blue)
self.controller.update_leds()
self.controller.set_rumble(self.rumble)
self.update_state()
time.sleep(0.01)
def start_loop(self):
self._active = True
self._loop_thread = Thread(target=self._loop)
self._loop_thread.daemon = True
self._loop_thread.start()
def update_state(self):
result = self.controller.poll()
if result:
buttons = self.controller.get_buttons()
button_events_on, button_events_off = self.controller.get_button_events()
self.btn_triangle = bool(buttons & psmove.Btn_TRIANGLE)
self.btn_circle = bool(buttons & psmove.Btn_CIRCLE)
self.btn_cross = bool(buttons & psmove.Btn_CROSS)
self.btn_square = bool(buttons & psmove.Btn_SQUARE)
self.btn_select = bool(buttons & psmove.Btn_SELECT)
self.btn_start = bool(buttons & psmove.Btn_START)
self.btn_move = bool(buttons & psmove.Btn_MOVE)
self.btn_t = bool(buttons & psmove.Btn_T)
self.btn_ps = bool(buttons & psmove.Btn_PS)
self.battery = self.controller.get_battery()
self.trigger = self.controller.get_trigger()
self.ax, self.ay, self.az = self.controller.get_accelerometer_frame(psmove.Frame_SecondHalf)
self.gx, self.gy, self.gz = self.controller.get_gyroscope_frame(psmove.Frame_SecondHalf)
def state_as_dict(self):
state_dict = {
'ax': self.ax,
'ay': self.ay,
'az': self.az,
'gx': self.gx,
'gy': self.gy,
'gz': self.gz,
'btn_triangle': self.btn_triangle,
'btn_circle': self.btn_circle,
'btn_cross': self.btn_cross,
'btn_square': self.btn_square,
'btn_select': self.btn_select,
'btn_start': self.btn_start,
'btn_move': self.btn_move,
'btn_t': self.btn_t,
'btn_ps': self.btn_ps,
'battery': self.battery,
'trigger': self.trigger,
'red': self.red,
'green': self.green,
'blue': self.blue,
'rumble': self.rumble,
}
# There's currently no way to get color
# or rumble directly from the controller
if self.read_only:
del state_dict['red']
del state_dict['green']
del state_dict['blue']
del state_dict['rumble']
return state_dict
def set_color(self, red=None, green=None, blue=None):
if red is not None:
self.red = red
if green is not None:
self.green = green
if blue is not None:
self.blue = blue
def set_rumble(self, rumble):
self.rumble = rumble
def on_btn_triangle(self, fn, *args, **kwargs):
callback = Callback(fn, *args, **kwargs)
_btn_triangle_callbacks.append(callback)
return callback
class RemoteController(Controller):
_red = 0
_green = 0
_blue = 0
_rumble = 0
_dirty = True # Default to True so values get cleared on startup
def __init__(self, url):
self.url = url
self.start_loop()
def _loop(self):
while(self._active):
self.update_state()
time.sleep(0.02)
if self._dirty:
self.update_remote_state()
self._dirty = False
def terminate(self):
# Let the loop do its thing until
# we're not dirty any more
while(self._dirty):
time.sleep(0.02)
self._active = False
if self._loop_thread:
self._loop_thread.join()
def update_remote_state(self):
data = {
'red': self.red,
'green': self.green,
'blue': self.blue,
'rumble': self.rumble,
}
try:
response = requests.put(self.url, data)
except requests.ConnectionError:
print "Could not connect to controller at %s" % self.url
self._active = False
return
if response.status_code == 404:
print "Controller not found at %s" % self.url
self._active = False
return
elif not response.ok:
print "Encountered error updating controller: %s (%s)" % (response.status_code, response.reason)
self._active = False
return
@property
def red(self):
return self._red
@red.setter
def red(self, val):
self._red = val
self._dirty = True
@property
def green(self):
return self._green
@green.setter
def green(self, val):
self._green = val
self._dirty = True
@property
def blue(self):
return self._blue
@blue.setter
def blue(self, val):
self._blue = val
self._dirty = True
@property
def rumble(self):
return self._rumble
@rumble.setter
def rumble(self, val):
self._rumble = val
self._dirty = True
def update_state(self):
try:
response = requests.get(self.url)
except requests.ConnectionError:
print "Could not connect to controller at %s" % self.url
self._active = False
return
if response.status_code == 404:
print "Controller not found at %s" % self.url
self._active = False
return
elif not response.ok:
print "Encountered error updating controller: %s (%s)" % (response.status_code, response.reason)
self._active = False
return
result = response.json()
self.btn_triangle = result.get('btn_triangle')
self.btn_circle = result.get('btn_circle')
self.btn_cross = result.get('btn_cross')
self.btn_square = result.get('btn_square')
self.btn_select = result.get('btn_select')
self.btn_start = result.get('btn_start')
self.btn_move = result.get('btn_move')
self.btn_t = result.get('btn_t')
self.btn_ps = result.get('btn_ps')
self.battery = result.get('battery')
self.ax = result.get('ax')
self.ay = result.get('ay')
self.az = result.get('ax')
self.gx = result.get('gx')
self.gy = result.get('gy')
self.gz = result.get('gz')
class Callback(object):
def __init__(self, fn, *args, **kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
self.fn(*self.args, **self.kwargs)
def get_controllers(read_only=False):
controllers = [psmove.PSMove(x) for x in range(psmove.count_connected())]
return [Controller(c, read_only) for c in controllers if c.connection_type == psmove.Conn_Bluetooth]
def get_remote_controller(url):
return RemoteController(url)
|
mit
| -3,089,800,069,205,572,600
| 26.776632
| 108
| 0.550662
| false
| 3.766542
| false
| false
| false
|
thorwhalen/ut
|
parse/venere.py
|
1
|
4355
|
__author__ = 'thorwhalen'
import ut.parse.util as parse_util
import re
import ut.pstr.trans as pstr_trans
import ut.parse.bsoup as parse_bsoup
pois_near_hotel_exp_0 = re.compile("(?<=\. )[\w ]+(?=- 0\.\d km / 0\.\d mi)")
pois_near_hotel_exp = re.compile("(.+)- (\d+[\.\d]*) km / (\d+[\.\d]*) mi")
def get_pois_near_hotel_location(html):
html = parse_util.x_to_soup(html)
html = html.find('div', attrs={'id': "location-distances"}).renderContents()
t = html.split('<br/>')
t = ['. '+x for x in t]
# print len(t)
# return [re.search(pois_near_hotel_exp, x) for x in t]
return [x.group(0).strip() for x in
[re.search(pois_near_hotel_exp_0, x) for x in t]
if x]
def parse_hotel_info_page(html):
html = parse_util.x_to_soup(html)
d = dict()
# hotel name
d = parse_bsoup.add_text_to_parse_dict(soup=html, parse_dict=d,
key='hotel_name', name='h1', attrs={'property': 'v:name'}, text_transform=parse_util.strip_spaces)
# hotel address
tag = html.find(name='p', attrs={'id': 'property-address'})
if tag:
d['hotel_address'] = pstr_trans.strip(tag.text)
d = parse_bsoup.add_text_to_parse_dict(soup=tag, parse_dict=d,
key='hotel_street_address', name='span', attrs={'property': "v:street-address"},
text_transform=parse_util.strip_spaces)
d = parse_bsoup.add_text_to_parse_dict(soup=tag, parse_dict=d,
key='hotel_locality', name='span', attrs={'property': "v:locality"},
text_transform=parse_util.strip_spaces)
# average price
d = parse_bsoup.add_text_to_parse_dict(soup=html, parse_dict=d,
key='currency', name='span', attrs={'id': 'currency-symbol'}, text_transform=parse_util.strip_spaces)
avgPriceEl0 = html.find(name='span', attrs={'id': 'avgPriceEl0'})
avgPriceDecimals = html.find(name='sup', attrs={'id': 'avgPriceDecimals'})
if avgPriceEl0:
d['average_price'] = avgPriceEl0.text
if avgPriceDecimals:
d['average_price'] = d['average_price'] + avgPriceDecimals.text
d['average_price'] = float(d['average_price'])
# facebook likes
d = parse_bsoup.add_text_to_parse_dict(soup=html, parse_dict=d,
key='facebook_likes', name='span', attrs={'class': 'pluginCountTextDisconnected'}, text_transform=float)
# num_of_photos
tag = html.find(name='div', attrs={'id': 'photo_gallery'})
if tag:
d['num_of_photos'] = len(tag.findAll(name='li'))
# hotel description
d = parse_bsoup.add_text_to_parse_dict(soup=html, parse_dict=d,
key='hotel_description', name='div', attrs={'id': 'hotel-description-body'}, text_transform=parse_util.strip_spaces)
# average_venere_rating
tag = html.find(name='div', attrs={'id': 'avg_guest_rating'})
if tag:
d['average_venere_rating'] = float(tag.find(name='b', attrs={'property': 'v:rating'}).text)
# facilities
tag = html.find(name='div', attrs={'id': 'facilities'})
if tag:
facilities = tag.findAll(name='li')
if facilities:
d['facilities'] = [parse_util.strip_spaces(x.text) for x in facilities]
# alternate names
tag = html.find(name='div', attrs={'id': 'also_known_as'})
if tag:
tag = tag.find(name='p')
if tag:
t = [parse_util.strip_spaces(x) for x in tag.renderContents().split('<br>')]
t = [parse_util.strip_tags(x) for x in t]
d['alternate_names'] = t
# overview_reviews
tag = html.find(name='div', attrs={'id': 'reviews-overview-hbar-box'})
if tag:
tagg = tag.findAll(name='div', attrs={'class': 'reviews-overview-horizzontalbar'})
if tagg:
d['overview_reviews'] = dict()
for t in tagg:
d['overview_reviews'][t.find(name='p').text] = float(t.find(name='b').text)
# location_distances
tag = html.find(name='div', attrs={'id': 'location-distances'})
if tag:
t = re.sub("^[^<]+<h2>.+</h2>","", tag.renderContents()).split('<br/>')
tt = [re.findall(pois_near_hotel_exp, x) for x in t]
tt = [x[0] for x in tt if x]
d['poi_and_distances'] = [{'poi': parse_util.strip_spaces(x[0].replace('"', '')), 'km': float(x[1]), 'mi': float(x[2])} for x in tt]
return d
|
mit
| -6,565,722,863,570,062,000
| 39.700935
| 140
| 0.586912
| false
| 3.04972
| false
| false
| false
|
auto-mat/django-webmap-corpus
|
webmap/models.py
|
1
|
18853
|
# -*- coding: utf-8 -*-
from author.decorators import with_author
from colorful.fields import RGBColorField
from constance.admin import config
import django
from django import forms
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection
from django.core.cache import cache
from django.db.models.signals import m2m_changed, post_delete, post_save
from django.forms import ModelForm
from django.utils.translation import gettext_lazy as _
from django_gpxpy import gpx_parse
from easy_thumbnails.files import get_thumbnailer
import fgp
from . import admin_image_widget
from .utils import SlugifyFileSystemStorage
def get_default_status():
try:
return config.DEFAULT_STATUS_ID
except:
return 0
class Status(models.Model):
"Stavy zobrazeni konkretniho objektu, vrstvy apod. - aktivni, navrzeny, zruseny, ..."
name = models.CharField(unique=True, max_length=255, verbose_name=_(u"name"), help_text=_(u"Status name"))
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"Status description."))
show = models.BooleanField(help_text=_(u"Show to map user"), default=False, verbose_name=_("show"))
show_to_mapper = models.BooleanField(help_text=_(u"Show to mapper"), default=False, verbose_name=_("show to mapper"))
class Meta:
verbose_name = _(u"status")
verbose_name_plural = _("statuses")
def __str__(self):
return self.name
class Layer(models.Model):
"Vrstvy, ktere se zobrazi v konkretni mape"
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"Name of the layer"), default="")
slug = models.SlugField(unique=True, verbose_name=_(u"name in URL"))
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_("Layer description."))
status = models.ForeignKey(Status, verbose_name=_("status"), on_delete=models.PROTECT)
order = models.IntegerField(verbose_name=_("order"), default=0, blank=False, null=False)
remark = models.TextField(null=True, blank=True, help_text=_(u"Internal information about layer."), verbose_name=_("internal remark"))
enabled = models.BooleanField(verbose_name=_(u"Enabled by defalut"), help_text=_(u"True = the layer is enabled on map load"), default=True)
icon_height = models.IntegerField(default=20)
icon_width = models.IntegerField(default=20)
icon = models.ImageField(
null=True,
blank=True,
upload_to='layer_icons',
storage=SlugifyFileSystemStorage(),
verbose_name=_("layer icon"),
height_field='icon_height',
width_field='icon_width',
)
class Meta:
verbose_name = _(u"layer")
verbose_name_plural = _(u"layers")
ordering = ['order']
def __init__(self, *args, **kwargs):
try:
self._meta.get_field('status').default = get_default_status()
except django.db.utils.ProgrammingError:
pass
return super(Layer, self).__init__(*args, **kwargs)
def __str__(self):
return self.name
class OverlayLayer(Layer):
class Meta:
verbose_name = _(u"overlay layer")
verbose_name_plural = _(u"overlay layers")
class Marker(models.Model):
"Map markers with display style definition."
name = models.CharField(unique=True, max_length=255, verbose_name=_(u"name"), help_text=_("Name of the marker."))
slug = models.SlugField(unique=True, verbose_name=_(u"name in URL"), null=True)
# Relationships
layer = models.ForeignKey(Layer, verbose_name=_("layer"), on_delete=models.PROTECT)
status = models.ForeignKey(Status, verbose_name=_("status"), on_delete=models.PROTECT)
# content
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"Detailed marker descrption."))
remark = models.TextField(null=True, blank=True, help_text=_(u"Internal information about layer."), verbose_name=_("internal remark"))
# Base icon and zoom dependent display range
default_icon_height = models.IntegerField(default=20)
default_icon_width = models.IntegerField(default=20)
default_icon = models.ImageField(
null=True,
blank=True,
upload_to='icons',
storage=SlugifyFileSystemStorage(),
verbose_name=_("default icon"),
height_field='default_icon_height',
width_field='default_icon_width',
)
menu_icon_height = models.IntegerField(default=20)
menu_icon_width = models.IntegerField(default=20)
menu_icon = models.ImageField(
null=True,
blank=True,
upload_to='icons/marker/menu',
storage=SlugifyFileSystemStorage(),
verbose_name=_("menu icon"),
height_field='menu_icon_height',
width_field='menu_icon_width',
)
minzoom = models.PositiveIntegerField(default=1, verbose_name=_("Minimal zoom"), help_text=_(u"Minimal zoom in which the POIs of this marker will be shown on the map."))
maxzoom = models.PositiveIntegerField(default=10, verbose_name=_("Maximal zoom"), help_text=_(u"Maximal zoom in which the POIs of this marker will be shown on the map."))
# Linear elements style
line_width = models.FloatField(verbose_name=_(u"line width"), default=2,)
line_color = RGBColorField(default="#ffc90e", verbose_name=_("line color"))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("created at"))
last_modification = models.DateTimeField(auto_now=True, verbose_name=_("last modification at"))
order = models.IntegerField(verbose_name=_("order"), default=0, blank=False, null=False)
def line_color_kml(this):
color = this.line_color[1:]
return "88" + color[4:6] + color[2:4] + color[0:2]
def __init__(self, *args, **kwargs):
try:
self._meta.get_field('status').default = get_default_status()
except django.db.utils.ProgrammingError:
pass
return super(Marker, self).__init__(*args, **kwargs)
class Meta:
permissions = [
("can_only_view", "Can only view"),
]
verbose_name = _(u"marker")
verbose_name_plural = _(u"markers")
ordering = ['order', ]
def __str__(self):
return self.name
class VisibleManager(models.Manager):
"Manager that will return objects visible on the map"
def get_queryset(self):
return super(VisibleManager, self).get_queryset().filter(status__show=True, marker__status__show=True, marker__layer__status__show=True)
class Sector(models.Model):
"Map sector"
name = models.CharField(max_length=255, verbose_name=_(u"name"))
slug = models.SlugField(unique=True, verbose_name=_(u"name in URL"))
geom = models.PolygonField(verbose_name=_(u"area"), srid=4326, help_text=_(u"Sector area"))
objects = models.Manager()
class Meta:
verbose_name = _(u"sector")
verbose_name_plural = _(u"sectors")
def __str__(self):
return self.name
@with_author
@fgp.guard('importance', 'status', name='can_edit_advanced_fields')
class Poi(models.Model):
"Place in map"
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"Exact place name"))
# Relationships
marker = models.ForeignKey(Marker, limit_choices_to={'status__show_to_mapper': 'True', 'layer__status__show_to_mapper': 'True'}, verbose_name=_(u"marker"), help_text=_("Select icon, that will be shown in map"), related_name="pois", on_delete=models.PROTECT)
status = models.ForeignKey(Status, default=0, help_text=_("POI status, determine if it will be shown in map"), verbose_name=_(u"status"), on_delete=models.SET_DEFAULT)
properties = models.ManyToManyField('Property', blank=True, help_text=_("POI properties"), verbose_name=_("properties"), limit_choices_to={'status__show_to_mapper': 'True'})
importance = models.SmallIntegerField(
default=0,
verbose_name=_(u"importance"),
help_text=_(u"""Minimal zoom modificator (use 20+ to show always).<br/>"""),
)
# Geographical intepretation
geom = models.GeometryCollectionField(
verbose_name=_(u"place geometry"),
default=None,
srid=4326,
help_text=_(u"""Add point: Select pencil with plus sign icon and place your point to the map.<br/>
Add line: Select line icon and by clicking to map draw the line. Finish drawing with double click.<br/>
Add area: Select area icon and by clicking to mapy draw the area. Finish drawing with double click.<br/>
Object edition: Select the first icon and then select object in map. Draw points in map to move them, use points in the middle of sections to add new edges."""),
)
objects = models.Manager()
# Own content (facultative)
desc = models.TextField(null=True, blank=True, verbose_name=_(u"description"), help_text=_(u"Text that will be shown after selecting POI."))
desc_extra = models.TextField(null=True, blank=True, verbose_name=_(u"detailed description"), help_text=_("Text that extends the description."))
url = models.URLField(null=True, blank=True, verbose_name=_("URL"), help_text=_(u"Link to the web page of the place."))
address = models.CharField(max_length=255, null=True, blank=True, verbose_name=_(u"adress"), help_text=_(u"Poi address (street, house number)"))
remark = models.TextField(null=True, blank=True, verbose_name=_(u"Internal remark"), help_text=_(u"Internal information about POI."))
# zde se ulozi slugy vsech vlastnosti, aby se pri renederovani kml
# nemusel delat db dotaz pro kazde Poi na jeho vlastnosti
properties_cache = models.CharField(max_length=255, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("created at"))
last_modification = models.DateTimeField(auto_now=True, verbose_name=_("last modification at"))
visible = VisibleManager()
class Meta:
permissions = [
("can_only_own_data_only", "Can only edit his own data"),
]
verbose_name = _("place")
verbose_name_plural = _("places")
def __str__(self):
if self.name:
return self.name
return str(self.marker)
def save_properties_cache(self):
self.properties_cache = u",".join([v.slug for v in self.properties.filter(status__show=True)])
self.save()
def get_absolute_url(self):
return "/misto/%i/" % self.id
def properties_list(self):
return u", ".join([p.name for p in self.properties.all()])
def __init__(self, *args, **kwargs):
try:
self._meta.get_field('status').default = get_default_status()
except django.db.utils.ProgrammingError:
pass
return super(Poi, self).__init__(*args, **kwargs)
def update_properties_cache(sender, instance, action, reverse, model, pk_set, **kwargs):
"Property cache actualization at POI save. It will not work yet after property removal."
if action == 'post_add':
instance.save_properties_cache()
m2m_changed.connect(update_properties_cache, Poi.properties.through)
class GpxPoiForm(ModelForm):
gpx_file = forms.FileField(required=False, help_text=_(u"Upload geometry by GPX file"))
class Meta:
model = Marker
exclude = ('geom',)
def clean(self):
cleaned_data = super(GpxPoiForm, self).clean()
if 'gpx_file' in self.cleaned_data:
gpx_file = self.cleaned_data['gpx_file']
if gpx_file:
cleaned_data['geom'] = GeometryCollection(gpx_parse.parse_gpx_filefield(gpx_file))
class Legend(models.Model):
"map legend items of underlay"
name = models.CharField(unique=True, max_length=255, verbose_name=_(u"name"))
en_name = models.CharField(unique=True, max_length=255, null=True, verbose_name=_(u"English name"))
slug = models.SlugField(unique=True, verbose_name=_(u"name in URL"))
desc = models.TextField(null=True, blank=True, verbose_name=_(u"description"))
image = models.ImageField(upload_to='ikony', storage=SlugifyFileSystemStorage(), verbose_name=_(u"image"))
class Meta:
verbose_name = _(u"legend item")
verbose_name_plural = _(u"legend items")
def __str__(self):
return self.name
def image_tag(self):
return admin_image_widget.list_display(self.image)
image_tag.allow_tags = True
image_tag.short_description = _(u"image")
class LegendAdminForm(ModelForm):
class Meta:
model = Legend
exclude = {}
widgets = {
'image': admin_image_widget.AdminImageWidget,
}
def invalidate_cache(sender, instance, **kwargs):
if sender in [Status, Layer, Marker, Poi, Property, Legend, Sector]:
cache.clear()
post_save.connect(invalidate_cache)
post_delete.connect(invalidate_cache)
class Property(models.Model):
"Place properties"
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"Status name"))
status = models.ForeignKey(Status, verbose_name=_("status"), on_delete=models.PROTECT)
as_filter = models.BooleanField(verbose_name=_("as filter?"), default=False, help_text=_(u"Show as a filter in right map menu?"))
order = models.IntegerField(verbose_name=_("order"), default=0, blank=False, null=False)
# content
slug = models.SlugField(unique=True, verbose_name=_("Name in URL"))
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"Property description."))
remark = models.TextField(null=True, blank=True, verbose_name=_(u"Internal remark"), help_text=_(u"Internal information about the property."))
default_icon_height = models.IntegerField(default=20)
default_icon_width = models.IntegerField(default=20)
default_icon = models.ImageField(
null=True,
blank=True,
upload_to='icons',
storage=SlugifyFileSystemStorage(),
verbose_name=_("default icon"),
height_field='default_icon_height',
width_field='default_icon_width',
)
class Meta:
verbose_name = _(u"property")
verbose_name_plural = _(u"properties")
ordering = ['order']
def __str__(self):
return self.name
def icon_tag(self):
return admin_image_widget.list_display(self.default_icon)
icon_tag.allow_tags = True
icon_tag.short_description = _(u"icon")
def __init__(self, *args, **kwargs):
try:
self._meta.get_field('status').default = get_default_status()
except django.db.utils.ProgrammingError:
pass
return super(Property, self).__init__(*args, **kwargs)
class License(models.Model):
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"License name"))
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"License description."))
class Meta:
verbose_name = _(u"license")
verbose_name_plural = _(u"licenses")
def __str__(self):
return self.name
class BaseLayer(Layer):
url = models.URLField(null=True, blank=True, verbose_name=_("URL"), help_text=_(u"Base layer tiles url. e.g. "))
class Meta:
verbose_name = _(u"base layer")
verbose_name_plural = _(u"base layers")
def __str__(self):
return self.name
class MapPreset(models.Model):
class Meta:
verbose_name = _(u"map preset")
verbose_name_plural = _(u"map presets")
ordering = ['order', ]
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"Name of preset"))
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"Map preset description."))
status = models.ForeignKey(Status, verbose_name=_("status"), default=None, null=True, on_delete=models.SET_NULL)
base_layer = models.ForeignKey(BaseLayer, verbose_name=_("base layer"), on_delete=models.PROTECT)
overlay_layers = models.ManyToManyField(OverlayLayer, blank=True, verbose_name=_("overlay layers"), limit_choices_to={'status__show_to_mapper': 'True'})
order = models.IntegerField(verbose_name=_("order"), default=0, blank=False, null=False)
icon = models.ImageField(
null=False,
blank=False,
upload_to='preset_icons',
storage=SlugifyFileSystemStorage(),
verbose_name=_(u"preset icon"),
)
def overlay_layers_slugs(self):
return [l.slug for l in self.overlay_layers.all()]
@with_author
class Photo(models.Model):
poi = models.ForeignKey(Poi, related_name="photos", verbose_name=_("poi"), on_delete=models.PROTECT)
name = models.CharField(max_length=255, verbose_name=_(u"name"), help_text=_(u"Photo name"), blank=True)
desc = models.TextField(null=True, blank=True, verbose_name=_("description"), help_text=_(u"Photo description."))
license = models.ForeignKey(License, verbose_name=_("license"), on_delete=models.PROTECT)
order = models.IntegerField(verbose_name=_("order"), default=0, blank=False, null=False)
photographer = models.CharField(max_length=255, verbose_name=_(u"Photography author"), blank=True, help_text=_(u"Full name of the author of the photography"))
status = models.ForeignKey(Status, default=None, help_text=_("Status, determine if the photo will be shown in map"), blank=False, null=True, verbose_name=_(u"status"), on_delete=models.SET_DEFAULT)
photo = models.ImageField(
null=False,
blank=False,
upload_to='photo',
storage=SlugifyFileSystemStorage(),
verbose_name=_(u"photo"),
help_text=_(u"Upload photo in full resolution."),
)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name=_("created at"))
last_modification = models.DateTimeField(auto_now=True, null=True, blank=True, verbose_name=_("last modification at"))
def thumb_url(self):
return get_thumbnailer(self.photo)['photo_thumb'].url
# if we want to filter photos by poi position
objects = models.Manager()
def image_tag(self):
return admin_image_widget.list_display(self.photo)
image_tag.short_description = _(u"image")
image_tag.allow_tags = True
def __str__(self):
if self.name:
return self.name
return self.poi.name
class Meta:
permissions = [
("can_view_photo_list", "Can view photo list"),
]
verbose_name = _(u"photo")
verbose_name_plural = _(u"photographies")
ordering = ['order', ]
class PhotoAdminForm(ModelForm):
class Meta:
model = Photo
exclude = {}
widgets = {
'photo': admin_image_widget.AdminImageWidget,
}
|
mit
| -2,574,332,993,582,529,000
| 39.631466
| 261
| 0.657667
| false
| 3.734007
| false
| false
| false
|
pitunti/alfaPitunti
|
plugin.video.alfa/channels/hentaienespanol.py
|
1
|
1882
|
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = 'http://www.xn--hentaienespaol-1nb.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart=''))
itemlist.append(
Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='',
fanart=''))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?'
patron += 'height="170px" src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle # .decode('utf-8')
thumbnail = scrapedthumbnail
fanart = ''
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
# Paginacion
title = ''
siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">')
title = 'Pagina Siguiente >>> '
fanart = ''
itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return todas(item)
else:
return []
|
gpl-3.0
| -2,298,668,226,065,378,000
| 28.873016
| 119
| 0.623804
| false
| 3.498141
| false
| false
| false
|
EIREXE/SpaceDock
|
SpaceDock/ckan.py
|
2
|
2394
|
from SpaceDock.config import _cfg
from github import Github
from flask import url_for
import subprocess
import json
import os
import re
# TODO(Thomas): Make this modular
def send_to_ckan(mod):
if not _cfg("netkan_repo_path"):
return
if not mod.ckan:
return
json_blob = {
'spec_version': 'v1.4',
'identifier': re.sub(r'\W+', '', mod.name),
'$kref': '#/ckan/spacedock/' + str(mod.id),
'license': mod.license,
'x_via': 'Automated ' + _cfg('site-name') + ' CKAN submission'
}
wd = _cfg("netkan_repo_path")
path = os.path.join(wd, 'NetKAN', json_blob['identifier'] + '.netkan')
if os.path.exists(path):
# If the file is already there, then chances are this mod has already been indexed
return
with open(path, 'w') as f:
f.write(json.dumps(json_blob, indent=4))
subprocess.call(['git', 'fetch', 'upstream'], cwd=wd)
subprocess.call(['git', 'checkout', '-b', 'add-' + json_blob['identifier'], 'upstream/master'], cwd=wd)
subprocess.call(['git', 'add', '-A'], cwd=wd)
subprocess.call(['git', 'commit', '-m', 'Add {0} from '.format(mod.name) + _cfg('site-name') + '\n\nThis is an automated commit on behalf of {1}'\
.format(mod.name, mod.user.username), '--author={0} <{1}>'.format(mod.user.username, mod.user.email)], cwd=wd)
subprocess.call(['git', 'push', '-u', 'origin', 'add-' + json_blob['identifier']], cwd=wd)
g = Github(_cfg('github_user'), _cfg('github_pass'))
r = g.get_repo("KSP-CKAN/NetKAN")
r.create_pull(title="Add {0} from ".format(mod.name) + _cfg('site-name'), base=r.default_branch, head=_cfg('github_user') + ":add-" + json_blob['identifier'], body=\
"""\
This pull request was automatically generated by """ + _cfg('site-name') + """ on behalf of {0}, to add [{1}]({4}{2}) to CKAN.
Mod details:
name = {2}
author = {0}
description = {5}
abstract = {6}
license = {7}
Homepage = {8}
Please direct questions about this pull request to [{0}]({4}{3}).
""".format(mod.user.username, mod.name,\
url_for('mods.mod', mod_name=mod.name, id=mod.id),\
url_for("profile.view_profile", username=mod.user.username),\
_cfg("protocol") + "://" + _cfg("domain"),\
mod.description, mod.short_description,\
mod.license, mod.external_link))
|
mit
| -2,606,136,009,649,760,000
| 40
| 169
| 0.587719
| false
| 3.121252
| false
| false
| false
|
clchiou/garage
|
py/garage/garage/multiprocessing/backport.py
|
1
|
1205
|
__all__ = [
'BoundedSemaphore',
'UnlimitedSemaphore',
'Timeout',
]
import threading
import time
# NOTE: This module is Python 2 compatible.
class Timeout(Exception):
pass
# Because Python 2 semaphore does not support timeout...
class BoundedSemaphore(object):
def __init__(self, value):
if value < 0:
raise ValueError('semaphore initial value must be >= 0')
self._cond = threading.Condition(threading.Lock())
self._initial_value = value
self._value = value
def acquire(self, timeout):
with self._cond:
endtime = time.time() + timeout
while self._value == 0:
timeout = endtime - time.time()
if timeout <= 0:
raise Timeout
self._cond.wait(timeout)
self._value -= 1
def release(self):
with self._cond:
if self._value >= self._initial_value:
raise ValueError('semaphore is released too many times')
self._value += 1
self._cond.notify()
class UnlimitedSemaphore(object):
def acquire(self, timeout):
pass
def release(self):
pass
|
mit
| 3,029,463,247,928,132,600
| 22.173077
| 72
| 0.561826
| false
| 4.479554
| false
| false
| false
|
garvenshen/swquota
|
tests/test_swquota.py
|
1
|
6671
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from swift.common.swob import Request
except ImportError:
from webob.exc import Request
from swquota.middleware import Swquota
class FakeCache(object):
def __init__(self, val):
self.val = val
def get(self, *args):
return self.val
def set(self, *args, **kwargs):
pass
class FakeApp(object):
def __init__(self, headers=[]):
self.headers = headers
def __call__(self, env, start_response):
start_response('200 OK', self.headers)
return []
def start_response(*args):
pass
class TestAccountQuota(unittest.TestCase):
def test_unauthorized(self):
headers = [('x-account-bytes-used', 1000), ]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
#Response code of 200 because authentication itself is not done here
self.assertEquals(res.status_int, 200)
def test_no_quotas(self):
headers = [('x-account-bytes-used', 1000), ]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'REMOTE_USER': 'a'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_exceed_bytes_quota(self):
headers = [('x-account-bytes-used', 1000),
('x-account-meta-bytes-limit', 0)]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'REMOTE_USER': 'a'})
res = req.get_response(app)
self.assertEquals(res.status_int, 413)
def test_exceed_bytes_quota_reseller(self):
headers = [('x-account-bytes-used', 1000),
('x-account-meta-bytes-limit', 0)]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'REMOTE_USER': 'a.,.reseller_admin'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_exceed_bytes_quota_reseller_keystone(self):
headers = [('x-account-bytes-used', 1000),
('x-account-meta-bytes-limit', 0)]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'HTTP_X_ROLES': 'a,reseller'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_not_exceed_bytes_quota(self):
headers = [('x-account-bytes-used', 1000),
('x-account-meta-bytes-limit', 2000)]
app = Swquota(FakeApp(headers), {})
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'REMOTE_USER': 'a'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_invalid_quotas(self):
headers = [('x-account-bytes-used', 0), ]
app = Swquota(FakeApp(headers), {})
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_BYTES_LIMIT': 'abc',
'REMOTE_USER': 'a,.reseller_admin'})
res = req.get_response(app, {})
self.assertEquals(res.status_int, 400)
def test_valid_quotas_admin(self):
headers = [('x-account-bytes-used', 0), ]
app = Swquota(FakeApp(headers), {})
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_BYTES_LIMIT': '100',
'REMOTE_USER': 'a'})
res = req.get_response(app, {})
self.assertEquals(res.status_int, 403)
def test_valid_quotas_reseller(self):
headers = [('x-account-bytes-used', 0), ]
app = Swquota(FakeApp(headers), {})
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_BYTES_LIMIT': 100,
'REMOTE_USER': 'a.,.reseller_admin'})
res = req.get_response(app, {})
self.assertEquals(res.status_int, 200)
def test_delete_quotas(self):
headers = [('x-account-bytes-used', 0), ]
app = Swquota(FakeApp(headers), {})
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_BYTES_LIMIT': None,
'REMOTE_USER': 'a'})
res = req.get_response(app, {})
self.assertEquals(res.status_int, 403)
def test_delete_quotas_reseller(self):
headers = [('x-account-bytes-used', 0), ]
app = Swquota(FakeApp(headers), {})
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_BYTES_LIMIT': None,
'REMOTE_USER': 'a.,.reseller_admin'})
res = req.get_response(app, {})
self.assertEquals(res.status_int, 200)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 6,304,735,773,008,333,000
| 37.33908
| 78
| 0.510418
| false
| 4.013839
| true
| false
| false
|
pablocarderam/genetargeter
|
gRNAScores/azimuth/metrics.py
|
1
|
20467
|
"""
from https://gist.github.com/bwhite/3726239
Information Retrieval metrics
Useful Resources:
http://www.cs.utexas.edu/~mooney/ir-course/slides/Evaluation.ppt
http://www.nii.ac.jp/TechReports/05-014E.pdf
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
http://hal.archives-ouvertes.fr/docs/00/72/67/60/PDF/07-busa-fekete.pdf
Learning to Rank for Information Retrieval (Tie-Yan Liu)
"""
from time import time
import numpy as np
from scipy.stats.mstats import rankdata
from .elevation.metrics import spearman_weighted_swap_perm_test
def mean_reciprocal_rank(relevance_scores: list) -> np.ndarray:
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
> mean_reciprocal_rank(rs)
0.61111111111111105
> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
> mean_reciprocal_rank(rs)
0.5
> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
> mean_reciprocal_rank(rs)
0.75
Args:
relevance_scores: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
relevance_scores = (np.asarray(r).nonzero()[0] for r in relevance_scores)
return np.mean([1.0 / (r[0] + 1) if r.size else 0.0 for r in relevance_scores])
def r_precision(relevance: list) -> np.ndarray:
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
> r = [0, 0, 1]
> r_precision(r)
0.33333333333333331
> r = [0, 1, 0]
> r_precision(r)
0.5
> r = [1, 0, 0]
> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
relevance = np.asarray(relevance) != 0
z = relevance.nonzero()[0]
if not z.size:
return 0.0
return np.mean(relevance[: z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
> r = [0, 0, 1]
> precision_at_k(r, 1)
0.0
> precision_at_k(r, 2)
0.0
> precision_at_k(r, 3)
0.33333333333333331
> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
:param k:
"""
if k < 1:
raise AssertionError()
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError("Relevance score length < k")
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
> delta_r = 1. / sum(r)
> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.0
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
> dcg_at_k(r, 1)
3.0
> dcg_at_k(r, 1, method=1)
3.0
> dcg_at_k(r, 2)
5.0
> dcg_at_k(r, 2, method=1)
4.2618595071429155
> dcg_at_k(r, 10)
9.6051177391888114
> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError("method must be 0 or 1.")
return 0.0
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.0
return dcg_at_k(r, k, method) / dcg_max
# ------------------------------------------------------------------------------------
# custom stuff from us to avoid problem with ties
def ndcg_at_k_ties(
labels: list,
predictions: list,
k: int,
method: int = 0,
normalize_from_below_too: bool = False,
theta=None,
) -> float:
"""
See 2008 McSherry et al on how to efficiently compute NDCG with ties
labels are ground truth
if k=None then k gets set to len(labels)
labels and predictions get flattened here
set normalize_from_below_too=False for conventional
ndcg_at_k_ties, but note this will only
ensure the max is 1, not that the min is zero.
to get that added guarantee, set this argument to True
"""
if isinstance(labels, list):
labels = np.array(labels)
if isinstance(predictions, list):
predictions = np.array(predictions)
if len(labels.shape) != 1 and np.min(labels.shape) != 1:
raise AssertionError("should be 1D array or equivalent")
if len(predictions.shape) != 1 and np.min(predictions.shape) != 1:
raise AssertionError("should be 1D array or equivalent")
labels = labels.flatten()
predictions = predictions.flatten()
if np.any(labels.shape != predictions.shape):
raise AssertionError("labels and predictions should have the same shape")
if k is None:
k = len(labels)
labels = labels.copy()
dcg = dcg_at_k_ties(labels, predictions, k, method=method, theta=theta)
dcg_max = dcg_at_k_ties(labels, labels, k, method, theta=theta)
# NOTE: I have checked that dcg_at_k_ties and dcg_at_k match when there are no ties,
# or ties in the labels
if normalize_from_below_too:
dcg_min = dcg_at_k_ties(
np.sort(labels)[::-1], np.sort(predictions), k, method, theta=theta
)
else:
dcg_min = 0
numerator = dcg - dcg_min
if numerator <= -1e-5:
raise AssertionError()
numerator = np.max((0, numerator))
ndcg = numerator / (dcg_max - dcg_min)
if not 1.0 >= ndcg >= 0.0:
raise AssertionError(f"ndcg={ndcg} should be in [0,1]")
if not dcg_max:
ndcg = 0.0
return ndcg
def dcg_helper(discount_factors, gain, k, labels, method, predictions):
# step through, in current order (of decreasing predictions), accumulating tied gains
# (which may be singletons)
ii = 0
dcg = 0.0
while ii < k:
current_pred = predictions[ii]
current_gain = gain(labels[ii], method)
# intializing the tied cumulative variables
cum_tied_gain = current_gain
cum_tied_disc = discount_factors[ii]
num_ties = 1
ii += 1
# count number of ties in predictions
while ii < len(predictions) and predictions[ii] == current_pred: # while tied
num_ties += 1.0
cum_tied_gain += gain(labels[ii], method)
if ii < k:
cum_tied_disc += discount_factors[ii]
ii += 1
avg_gain = cum_tied_gain / num_ties
dcg += avg_gain * cum_tied_disc
if np.isnan(dcg):
raise AssertionError("found nan dcg")
return dcg
def dcg_at_k_ties(labels, predictions, k, method=0, theta=None):
"""
See 2008 McSherry et al on how to efficiently compute NDCG (method=0 here) with ties
(in the predictions)
'labels' are what the "ground truth" judges assign
'predictions' are the algorithm predictions corresponding to each label
Also, http://en.wikipedia.org/wiki/Discounted_cumulative_gain for basic defns
"""
if not isinstance(predictions, np.ndarray):
raise AssertionError()
if len(labels) != len(predictions):
raise AssertionError("labels and predictions should be of same length")
if k > len(labels):
raise AssertionError("k should be <= len(labels)")
# order both labels and preds so that they are in order of decreasing predictive score
sorted_ind = np.argsort(predictions)[::-1]
predictions = predictions[sorted_ind]
labels = labels[sorted_ind]
def gain(label, method):
if method == 0:
return label
elif method == 1:
return 2 ** label - 1.0
elif method == 2 or method == 3 or method == 4:
return label
else:
raise NotImplementedError()
if method == 0:
discount_factors = get_discount_factors(len(labels), discount="log2")
elif method == 1:
raise Exception("need to implement: log_2(i+1)")
elif method == 2:
discount_factors = get_discount_factors(len(labels), discount="linear")
elif method == 3:
discount_factors = get_discount_factors(len(labels), discount="combination")
elif method == 4:
if theta is None:
raise AssertionError("need to specify theta or theta")
discount_factors = get_discount_factors(
len(labels), discount="1/rtheta", theta=theta
)
else:
raise NotImplementedError()
if len(discount_factors) != len(labels):
raise AssertionError("discount factors has wrong length")
dcg = dcg_helper(discount_factors, gain, k, labels, method, predictions)
if np.isnan(dcg):
raise AssertionError("found nan dcg")
return dcg
def get_discount_factors(num_labels, discount="log2", theta=None):
ii_range = np.arange(num_labels) + 1
if discount == "log2":
discount_factors = np.concatenate(
(np.array([1.0]), 1.0 / np.log2(ii_range[1:]))
)
elif discount == "linear":
discount_factors = -ii_range / float(num_labels) + 1.0
elif discount == "combination":
l2 = np.concatenate((np.array([1.0]), 1.0 / np.log2(ii_range[1:])))
linear = -ii_range / float(num_labels) + 1.0
discount_factors = np.max((l2, linear), axis=0)
elif discount == "1/rtheta":
discount_factors = 1.0 / (ii_range ** theta)
else:
raise NotImplementedError
return discount_factors
def rank_data(r, rground):
# we checked this heavily, and is correct, e.g. rground will go from largest rank to smallest
r = rankdata(r)
rground = rankdata(rground)
if np.sum(r) != np.sum(rground):
raise AssertionError("ranks should add up to the same")
return r, rground
def dcg_alt(relevances, rank=20):
relevances = np.asarray(relevances)[:rank]
n_relevances = len(relevances)
if n_relevances == 0:
return 0.0
discounts = np.log2(np.arange(n_relevances) + 2)
return np.sum(relevances / discounts)
def ndcg_alt(relevances, rank=20):
best_dcg = dcg_alt(sorted(relevances, reverse=True), rank)
if best_dcg == 0:
return 0.0
return dcg_alt(relevances, rank) / best_dcg
def ndcg_at_k_swap_perm_test(
preds1, preds2, true_labels, nperm, method, k, normalize_from_below_too, theta=None
):
# pVal is the probability that we would observe as big an AUC diff as we
# did if the ROC curves were drawn from the null hypothesis (which is that
# one model does not perform better than the other)
#
# null hypothesis is that the prediction ranking are the same, so we exchange a random
# number of them with each other.
#
# see ndcg_at_k_ties for all but the first four parameters
#
# balance_zeros = True means that when we swap a zero for a non-zero value, we will also do
# a reverse swap
#
# this is a two-sided test, but since it is a symmetric null distribution, one should
# be able to divide the p-value by 2 to get the one-sided version (but think this through
# before using)
if isinstance(preds1, list):
preds1 = np.array(preds1)
else:
preds1 = preds1.flatten()
if isinstance(preds2, list):
preds2 = np.array(preds2)
else:
preds2 = preds2.flatten()
if isinstance(true_labels, list):
true_labels = np.array(true_labels)
else:
true_labels = true_labels.flatten()
if len(preds1) != len(preds2):
raise AssertionError("need same number of preditions from each model")
if len(preds1) != len(true_labels):
raise AssertionError("need same number of preditions in truth and predictions")
N = len(preds1)
# re-sort all by truth ordering so that when swap they are aligned
sorted_ind = np.argsort(true_labels)[::-1]
true_labels = true_labels[sorted_ind]
preds1 = preds1[sorted_ind]
preds2 = preds2[sorted_ind]
ranks1 = rankdata(preds1)
ranks2 = rankdata(preds2)
ndcg1 = ndcg_at_k_ties(
true_labels,
ranks1,
k=k,
method=method,
normalize_from_below_too=normalize_from_below_too,
theta=theta,
)
ndcg2 = ndcg_at_k_ties(
true_labels,
ranks2,
k=k,
method=method,
normalize_from_below_too=normalize_from_below_too,
theta=theta,
)
real_ndcg_diff = np.abs(ndcg1 - ndcg2)
perm_ndcg_diff = np.nan * np.zeros(nperm)
if np.all(preds1 == preds2):
pval = 1.0
else:
zero_ind = true_labels == 0
if np.sum(zero_ind) >= len(zero_ind):
raise AssertionError("balancing assumes there are more zeros than ones")
for _ in range(nperm):
pair_ind_to_swap = np.random.rand(N) < 0.5
ranks1_perm = ranks1.copy()
ranks1_perm[pair_ind_to_swap] = ranks2[pair_ind_to_swap]
ranks2_perm = ranks2.copy()
ranks2_perm[pair_ind_to_swap] = ranks1[pair_ind_to_swap]
ndcg1_perm = ndcg_at_k_ties(
true_labels,
ranks1_perm,
k=k,
method=method,
normalize_from_below_too=normalize_from_below_too,
theta=theta,
)
ndcg2_perm = ndcg_at_k_ties(
true_labels,
ranks2_perm,
k=k,
method=method,
normalize_from_below_too=normalize_from_below_too,
theta=theta,
)
for thing in theta:
tmp_diff = np.abs(ndcg1_perm[thing] - ndcg2_perm[thing])
perm_ndcg_diff[thing][_] = tmp_diff
num_stat_greater = np.max((((perm_ndcg_diff > real_ndcg_diff).sum() + 1), 1.0))
pval = num_stat_greater / nperm
return pval, real_ndcg_diff, perm_ndcg_diff, ndcg1, ndcg2
if __name__ == "__main__":
simulated_data = True
permute_real_data = True
T = 1000
nperm = 100
weights = np.array([0.001])
theta_range = weights # just to make life easier
# only for simulated data
N = 100
frac_zeros = 0
k = None
allp = np.nan * np.zeros((len(theta_range) + 1, T))
if not simulated_data:
# print(
# "loading up saved data..."
# ) # two-fold CV data from CRISPR off-target GUIDE-SEQ
# with open(r"\\nerds5\kevin\from_nicolo\gs.pickle", "rb") as f:
# predictions, truth_all = pickle.load(f)
# print("done.")
# N = len(truth_all[0])
pass # that gs.pickle file was not in the source repo
for t in range(T):
# totally simulated
if simulated_data:
truth = np.random.rand(N)
zero_ind = np.random.rand(N) < frac_zeros
truth[zero_ind] = 0
pred1 = np.random.rand(N)
pred2 = np.random.rand(N)
# this all refers to stuff from that unavailable gs.pickle from above
# else:
# fold = 0
# truth = truth_all[fold]
# pred1 = predictions["CFD"][fold]
# pred2 = predictions["product"][fold]
# if permute_real_data:
# truth = np.random.permutation(truth)
t0 = time()
for i, w in enumerate(weights):
weights_array = truth.copy()
weights_array += w
pvaltmp, real_corr_diff, perm_corr_diff, corr1, corr2 = spearman_weighted_swap_perm_test(
pred1, pred2, truth, nperm, weights_array
)
allp[i, t] = pvaltmp
t1 = time()
truth = np.array([3, 4, 2, 1, 0, 0, 0])
pred1 = np.array([3, 4, 2, 1, 0, 0, 0])
pred2 = np.array([2, 1, 3, 4, 5, 6, 7])
truth3 = np.array([3, 4, 2, 1, 0, 0, 0])
truth4 = np.zeros(7)
truth4[0] = 1
pred3 = np.array([2, 1, 3, 4, 5, 6, 7]) * 10
pred4 = np.array([4, 3, 2, 1, 0, 0, 0])
pred5 = np.array([4, 3, 1, 2, 0, 0, 0])
nperm = 1000
method = 4
theta = 0.5
normalize_from_below_too = True
k = len(pred3)
pval, real_ndcg_diff, perm_ndcg_diff, ndcg1, ndcg2 = ndcg_at_k_swap_perm_test(
pred1, pred2, truth, nperm, method, k, normalize_from_below_too, theta=theta
)
print(f"ndcg1={ndcg1}, ndcg2={ndcg2}, ndcg_diff={real_ndcg_diff}, p={pval}")
pval, real_ndcg_diff, perm_ndcg_diff, ndcg1, ndcg2 = ndcg_at_k_swap_perm_test(
pred1, pred1, truth, nperm, method, k, normalize_from_below_too, theta=theta
)
print(f"ndcg1={ndcg1}, ndcg2={ndcg2}, ndcg_diff={real_ndcg_diff}, p={pval}")
pval, real_ndcg_diff, perm_ndcg_diff, ndcg1, ndcg2 = ndcg_at_k_swap_perm_test(
pred1, pred4, truth, nperm, method, k, normalize_from_below_too, theta=theta
)
print(f"ndcg1={ndcg1}, ndcg2={ndcg2}, ndcg_diff={real_ndcg_diff}, p={pval}")
pval, real_ndcg_diff, perm_ndcg_diff, ndcg1, ndcg2 = ndcg_at_k_swap_perm_test(
pred1, pred5, truth, nperm, method, k, normalize_from_below_too, theta=theta
)
print(f"ndcg1={ndcg1}, ndcg2={ndcg2}, ndcg_diff={real_ndcg_diff}, p={pval}")
print(ndcg_at_k_ties(truth4, pred2, k, method=3, normalize_from_below_too=True))
print(ndcg_alt(truth[np.argsort(pred2)[::-1]], 5))
print(ndcg_at_k(truth[np.argsort(pred2)[::-1]], 5, method=1))
print(ndcg_at_k(truth[np.argsort(pred2)[::-1]], 5, method=0))
print(ndcg_at_k_ties(truth, pred2, 5, method=1))
print(ndcg_at_k_ties(truth, pred2, 5, method=0))
|
mit
| 6,929,932,319,144,185,000
| 30.010606
| 101
| 0.591391
| false
| 3.211013
| false
| false
| false
|
TheVirtualLtd/bda.plone.orders
|
src/bda/plone/orders/upgrades.py
|
1
|
14529
|
# -*- coding: utf-8 -*-
from bda.plone.cart import get_object_by_uid
from bda.plone.orders import message_factory as _
from bda.plone.orders.common import acquire_vendor_or_shop_root
from bda.plone.orders.common import calculate_order_salaried
from bda.plone.orders.common import calculate_order_state
from bda.plone.orders.common import get_bookings_soup
from bda.plone.orders.common import get_order
from bda.plone.orders.common import get_orders_soup
from bda.plone.orders.common import OrderData
from bda.plone.orders.contacts import get_contacts_soup
from bda.plone.orders.interfaces import ITrading
from bda.plone.payment import Payments
from bda.plone.shipping.interfaces import IShippingItem
from decimal import Decimal
from node.ext.zodb.utils import reset_odict
from plone.uuid.interfaces import IUUID
from zope.component.hooks import getSite
import logging
import uuid
logger = logging.getLogger('bda.plone.orders UPGRADE')
def fix_bookings_vendor_uid(ctx=None):
"""Add vendor_uid attribute to booking records.
"""
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['vendor_uid']
if not isinstance(item.attrs['vendor_uid'], uuid.UUID):
update = True
except KeyError:
update = True
if not update:
continue
buyable_uid = item.attrs['buyable_uid']
obj = get_object_by_uid(portal, buyable_uid)
if not obj:
shop = acquire_vendor_or_shop_root(portal)
else:
shop = acquire_vendor_or_shop_root(obj)
vendor_uid = uuid.UUID(IUUID(shop))
item.attrs['vendor_uid'] = vendor_uid
need_rebuild = True
logging.info(
u"Added vendor_uid to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_orders_vendor_uids(ctx=None):
"""Add vendor_uids attribute to order records.
"""
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['vendor_uids']
if not isinstance(item.attrs['vendor_uids'], list)\
or not item.attrs['vendor_uids']:
update = True
except KeyError:
update = True
if not update:
continue
order_data = OrderData(portal, order=item)
vendor_uids = set()
for booking in order_data.bookings:
vendor_uids.add(booking.attrs['vendor_uid'])
item.attrs['vendor_uids'] = list(vendor_uids)
need_rebuild = True
logging.info(
u"Added vendor_uids to order {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt orders catalog")
def fix_bookings_state_salaried_tid(ctx=None):
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
order_data = OrderData(portal, order=item)
try:
state = item.attrs['state']
state_exists = True
except KeyError:
state = None
state_exists = False
try:
salaried = item.attrs['salaried']
salaried_exists = True
except KeyError:
salaried = None
salaried_exists = False
try:
tid = item.attrs['tid']
tid_exists = True
except KeyError:
tid = 'none' # tid default in b.p.payment
tid_exists = False
for booking in order_data.bookings:
# add too booking node
try:
booking.attrs['state']
except KeyError:
booking.attrs['state'] = state
need_rebuild = True
logging.info(
u"Added state {0} to booking {1}".format(
state, item.attrs['uid']
)
)
try:
booking.attrs['salaried']
except KeyError:
booking.attrs['salaried'] = salaried
need_rebuild = True
logging.info(
u"Added salaried {0} to booking {1}".format(
salaried, item.attrs['uid']
)
)
try:
booking.attrs['tid']
except KeyError:
booking.attrs['tid'] = tid
need_rebuild = True
logging.info(
u"Added tid {0} to booking {1}".format(
tid, item.attrs['uid']
)
)
# now, delete from order node
if state_exists:
del item.attrs['state']
if salaried_exists:
del item.attrs['salaried']
if tid_exists:
del item.attrs['tid']
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_discount_attrs(ctx=None):
portal = getSite()
# discount attrs on order
orders_soup = get_orders_soup(portal)
need_rebuild = False
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['cart_discount_net']
except KeyError:
need_rebuild = True
item.attrs['cart_discount_net'] = Decimal(0)
logging.info(
u"Added cart_discount_net to order {0}".format(
item.attrs['uid']
)
)
try:
item.attrs['cart_discount_vat']
except KeyError:
need_rebuild = True
item.attrs['cart_discount_vat'] = Decimal(0)
logging.info(
u"Added cart_discount_vat to order {0}".format(
item.attrs['uid']
)
)
if need_rebuild:
orders_soup.rebuild()
logging.info("Rebuilt orders catalog")
# discount attrs on bookings
bookings_soup = get_bookings_soup(portal)
need_rebuild = False
data = bookings_soup.storage.data
for item in data.values():
try:
item.attrs['discount_net']
except KeyError:
need_rebuild = True
item.attrs['discount_net'] = Decimal(0)
logging.info(
u"Added discount_net to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_shipping_attrs(ctx=None):
portal = getSite()
orders_soup = get_orders_soup(portal)
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['shipping_method']
except KeyError:
item.attrs['shipping_method'] = 'unknown'
logging.info(
u"Added shipping_method {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_label']
except KeyError:
item.attrs['shipping_label'] = _('unknown', default=u'Unknown')
logging.info(
u"Added shipping_label {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_description']
except KeyError:
item.attrs['shipping_description'] = \
_('unknown', default=u'Unknown')
logging.info(
u"Added shipping_description {0} to booking {1}".format(
'unknown', item.attrs['uid']
)
)
try:
item.attrs['shipping_net']
except KeyError:
item.attrs['shipping_net'] = item.attrs['shipping']
logging.info(
u"Added shipping_net {0} to booking {1}".format(
item.attrs['shipping'], item.attrs['uid']
)
)
try:
item.attrs['shipping_vat']
except KeyError:
item.attrs['shipping_vat'] = Decimal(0)
logging.info(
u"Added shipping_vat {0} to booking {1}".format(
Decimal(0), item.attrs['uid']
)
)
def fix_payment_attrs(ctx=None):
portal = getSite()
payments = Payments(portal)
orders_soup = get_orders_soup(portal)
data = orders_soup.storage.data
for item in data.values():
try:
item.attrs['payment_method']
item.attrs['payment_label']
continue
except KeyError:
payment_method = item.attrs['payment_selection.payment']
payment = payments.get(payment_method)
if payment:
payment_label = payment.label
else:
payment_label = _('unknown', default=u'Unknown')
item.attrs['payment_method'] = payment_method
logging.info(
u"Added payment_method {0} to booking {1}".format(
payment_method, item.attrs['uid']
)
)
item.attrs['payment_label'] = payment_label
logging.info(
u"Added payment_label {0} to booking {1}".format(
payment_label, item.attrs['uid']
)
)
def fix_bookings_shippable(ctx=None):
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for booking in data.values():
try:
booking.attrs['shippable']
except KeyError:
obj = get_object_by_uid(portal, booking.attrs['buyable_uid'])
shippable = True
if obj:
shippable = IShippingItem(obj).shippable
booking.attrs['shippable'] = shippable
need_rebuild = True
logging.info(
u"Added shippable {0} to booking {1}".format(
shippable, booking.attrs['uid']
)
)
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_bookings_trading(ctx=None):
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for booking in data.values():
try:
booking.attrs['item_number']
except KeyError:
obj = get_object_by_uid(portal, booking.attrs['buyable_uid'])
if obj:
trading = ITrading(obj)
item_number = trading.item_number
gtin = trading.gtin
else:
item_number = ''
gtin = ''
need_rebuild = True
booking.attrs['item_number'] = item_number
logging.info(
u"Added item_number {0} to booking {1}".format(
item_number, booking.attrs['uid']
)
)
booking.attrs['gtin'] = gtin
logging.info(
u"Added gtin {0} to booking {1}".format(
gtin, booking.attrs['uid']
)
)
if need_rebuild:
bookings_soup = get_bookings_soup(portal)
bookings_soup.rebuild()
logging.info("Rebuilt bookings catalog")
def reset_records(ctx=None):
def ignore_key(key):
return key.startswith('____')
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
for order in data.values():
reset_odict(order.attrs.storage, ignore_key=ignore_key)
logging.info(
u'Reset attributes storage on order {0}'.format(
order.attrs['uid'],
)
)
soup = get_bookings_soup(portal)
data = soup.storage.data
for booking in data.values():
reset_odict(booking.attrs.storage, ignore_key=ignore_key)
logging.info(
u"Reset attributes storage on booking {0}".format(
booking.attrs['uid']
)
)
def fix_bookings_email(ctx=None):
"""Add email attribute to booking records from the corresponding order.
"""
portal = getSite()
soup = get_bookings_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['email']
except KeyError:
update = True
if not update:
continue
order = get_order(portal, item.attrs['order_uid'])
email = order.attrs.get('personal_data.email', 'n/a')
item.attrs['email'] = email
need_rebuild = True
logging.info(
u"Added email to booking {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt bookings catalog")
def fix_contacts_email(ctx=None):
"""Add email attribute to contact records.
"""
portal = getSite()
soup = get_contacts_soup(portal)
data = soup.storage.data
need_rebuild = False
for item in data.values():
update = False
try:
item.attrs['email']
except KeyError:
update = True
if not update:
continue
email = item.attrs.get('personal_data.email', 'n/a')
item.attrs['email'] = email
need_rebuild = True
logging.info(
u"Added email to contact {0}".format(item.attrs['uid'])
)
if need_rebuild:
soup.rebuild()
logging.info("Rebuilt contacts catalog")
def fix_order_state_and_salaried(ctx=None):
"""Re-add state and salaried on order, needed for sorting in orders table
"""
portal = getSite()
soup = get_orders_soup(portal)
data = soup.storage.data
for order in data.values():
order_data = OrderData(portal, uid=order.attrs['uid'])
bookings = order_data.bookings
order.attrs['state'] = calculate_order_state(bookings)
order.attrs['salaried'] = calculate_order_salaried(bookings)
soup.rebuild()
|
bsd-3-clause
| -2,279,739,217,219,872,300
| 31.286667
| 78
| 0.542157
| false
| 4.092676
| false
| false
| false
|
pydoit/doit
|
doit/runner.py
|
1
|
20369
|
"""Task runner"""
from multiprocessing import Process, Queue as MQueue
from threading import Thread
import pickle
import queue
import cloudpickle
from .exceptions import InvalidTask, CatchedException
from .exceptions import TaskFailed, SetupError, DependencyError, UnmetDependency
from .task import Stream, DelayedLoaded
# execution result.
SUCCESS = 0
FAILURE = 1
ERROR = 2
class Runner():
"""Task runner
run_all()
run_tasks():
for each task:
select_task()
execute_task()
process_task_result()
finish()
"""
def __init__(self, dep_manager, reporter, continue_=False,
always_execute=False, stream=None):
"""
@param dep_manager: DependencyBase
@param reporter: reporter object to be used
@param continue_: (bool) execute all tasks even after a task failure
@param always_execute: (bool) execute even if up-to-date or ignored
@param stream: (task.Stream) global verbosity
"""
self.dep_manager = dep_manager
self.reporter = reporter
self.continue_ = continue_
self.always_execute = always_execute
self.stream = stream if stream else Stream(0)
self.teardown_list = [] # list of tasks to be teardown
self.final_result = SUCCESS # until something fails
self._stop_running = False
def _handle_task_error(self, node, catched_excp):
"""handle all task failures/errors
called whenever there is an error before executing a task or
its execution is not successful.
"""
assert isinstance(catched_excp, CatchedException)
node.run_status = "failure"
self.dep_manager.remove_success(node.task)
self.reporter.add_failure(node.task, catched_excp)
# only return FAILURE if no errors happened.
if isinstance(catched_excp, TaskFailed) and self.final_result != ERROR:
self.final_result = FAILURE
else:
self.final_result = ERROR
if not self.continue_:
self._stop_running = True
def _get_task_args(self, task, tasks_dict):
"""get values from other tasks"""
task.init_options()
def get_value(task_id, key_name):
"""get single value or dict from task's saved values"""
if key_name is None:
return self.dep_manager.get_values(task_id)
return self.dep_manager.get_value(task_id, key_name)
# selected just need to get values from other tasks
for arg, value in task.getargs.items():
task_id, key_name = value
if tasks_dict[task_id].has_subtask:
# if a group task, pass values from all sub-tasks
arg_value = {}
base_len = len(task_id) + 1 # length of base name string
for sub_id in tasks_dict[task_id].task_dep:
name = sub_id[base_len:]
arg_value[name] = get_value(sub_id, key_name)
else:
arg_value = get_value(task_id, key_name)
task.options[arg] = arg_value
def select_task(self, node, tasks_dict):
"""Returns bool, task should be executed
* side-effect: set task.options
Tasks should be executed if they are not up-to-date.
Tasks that contains setup-tasks must be selected twice,
so it gives chance for dependency tasks to be executed after
checking it is not up-to-date.
"""
task = node.task
# if run_status is not None, it was already calculated
if node.run_status is None:
self.reporter.get_status(task)
# overwrite with effective verbosity
task.overwrite_verbosity(self.stream)
# check if task should be ignored (user controlled)
if node.ignored_deps or self.dep_manager.status_is_ignore(task):
node.run_status = 'ignore'
self.reporter.skip_ignore(task)
return False
# check task_deps
if node.bad_deps:
bad_str = " ".join(n.task.name for n in node.bad_deps)
self._handle_task_error(node, UnmetDependency(bad_str))
return False
# check if task is up-to-date
res = self.dep_manager.get_status(task, tasks_dict)
if res.status == 'error':
msg = "ERROR: Task '{}' checking dependencies: {}".format(
task.name, res.get_error_message())
self._handle_task_error(node, DependencyError(msg))
return False
# set node.run_status
if self.always_execute:
node.run_status = 'run'
else:
node.run_status = res.status
# if task is up-to-date skip it
if node.run_status == 'up-to-date':
self.reporter.skip_uptodate(task)
task.values = self.dep_manager.get_values(task.name)
return False
if task.setup_tasks:
# dont execute now, execute setup first...
return False
else:
# sanity checks
assert node.run_status == 'run', \
"%s:%s" % (task.name, node.run_status)
assert task.setup_tasks
try:
self._get_task_args(task, tasks_dict)
except Exception as exception:
msg = ("ERROR getting value for argument\n" + str(exception))
self._handle_task_error(node, DependencyError(msg))
return False
return True
def execute_task(self, task):
"""execute task's actions"""
# register cleanup/teardown
if task.teardown:
self.teardown_list.append(task)
# finally execute it!
self.reporter.execute_task(task)
return task.execute(self.stream)
def process_task_result(self, node, catched_excp):
"""handles result"""
task = node.task
# save execution successful
if catched_excp is None:
task.save_extra_values()
try:
self.dep_manager.save_success(task)
except FileNotFoundError as exception:
msg = ("ERROR: Task '{}' saving success: " \
"Dependent file '{}' does not exist".format(
task.name, exception.filename))
catched_excp = DependencyError(msg)
else:
node.run_status = "successful"
self.reporter.add_success(task)
return
# task error
self._handle_task_error(node, catched_excp)
def run_tasks(self, task_dispatcher):
"""This will actually run/execute the tasks.
It will check file dependencies to decide if task should be executed
and save info on successful runs.
It also deals with output to stdout/stderr.
@param task_dispatcher: L{TaskDispacher}
"""
node = None
while True:
if self._stop_running:
break
try:
node = task_dispatcher.generator.send(node)
except StopIteration:
break
if not self.select_task(node, task_dispatcher.tasks):
continue
catched_excp = self.execute_task(node.task)
self.process_task_result(node, catched_excp)
def teardown(self):
"""run teardown from all tasks"""
for task in reversed(self.teardown_list):
self.reporter.teardown_task(task)
catched = task.execute_teardown(self.stream)
if catched:
msg = "ERROR: task '%s' teardown action" % task.name
error = SetupError(msg, catched)
self.reporter.cleanup_error(error)
def finish(self):
"""finish running tasks"""
# flush update dependencies
self.dep_manager.close()
self.teardown()
# report final results
self.reporter.complete_run()
return self.final_result
def run_all(self, task_dispatcher):
"""entry point to run tasks
@ivar task_dispatcher (TaskDispatcher)
"""
try:
if hasattr(self.reporter, 'initialize'):
self.reporter.initialize(task_dispatcher.tasks,
task_dispatcher.selected_tasks)
self.run_tasks(task_dispatcher)
except InvalidTask as exception:
self.reporter.runtime_error(str(exception))
self.final_result = ERROR
finally:
self.finish()
return self.final_result
# JobXXX objects send from main process to sub-process for execution
class JobHold(object):
"""Indicates there is no task ready to be executed"""
type = object()
class JobTask(object):
"""Contains a Task object"""
type = object()
def __init__(self, task):
self.name = task.name
try:
self.task_pickle = cloudpickle.dumps(task)
except pickle.PicklingError as excp:
msg = """Error on Task: `{}`.
Task created at execution time that has an attribute than can not be pickled,
so not feasible to be used with multi-processing. To fix this issue make sure
the task is pickable or just do not use multi-processing execution.
Original exception {}: {}
"""
raise InvalidTask(msg.format(self.name, excp.__class__, excp))
class JobTaskPickle(object):
"""dict of Task object excluding attributes that might be unpicklable"""
type = object()
def __init__(self, task):
self.task_dict = task.pickle_safe_dict() # actually a dict to be pickled
@property
def name(self):
return self.task_dict['name']
class MReporter(object):
"""send reported messages to master process
puts a dictionary {'name': <task-name>,
'reporter': <reporter-method-name>}
on runner's 'result_q'
"""
def __init__(self, runner, reporter_cls):
self.runner = runner
self.reporter_cls = reporter_cls
def __getattr__(self, method_name):
"""substitute any reporter method with a dispatching method"""
if not hasattr(self.reporter_cls, method_name):
raise AttributeError(method_name)
def rep_method(task):
self.runner.result_q.put({'name':task.name,
'reporter':method_name})
return rep_method
def complete_run(self):
"""ignore this on MReporter"""
pass
class MRunner(Runner):
"""MultiProcessing Runner """
Queue = staticmethod(MQueue)
Child = staticmethod(Process)
@staticmethod
def available():
"""check if multiprocessing module is available"""
# see: https://bitbucket.org/schettino72/doit/issue/17
# http://bugs.python.org/issue3770
# not available on BSD systens
try:
import multiprocessing.synchronize
multiprocessing # pyflakes
except ImportError: # pragma: no cover
return False
else:
return True
def __init__(self, dep_manager, reporter,
continue_=False, always_execute=False,
stream=None, num_process=1):
Runner.__init__(self, dep_manager, reporter, continue_=continue_,
always_execute=always_execute, stream=stream)
self.num_process = num_process
self.free_proc = 0 # number of free process
self.task_dispatcher = None # TaskDispatcher retrieve tasks
self.tasks = None # dict of task instances by name
self.result_q = None
def __getstate__(self):
# multiprocessing on Windows will try to pickle self.
# These attributes are actually not used by spawend process so
# safe to be removed.
pickle_dict = self.__dict__.copy()
pickle_dict['reporter'] = None
pickle_dict['task_dispatcher'] = None
pickle_dict['dep_manager'] = None
return pickle_dict
def get_next_job(self, completed):
"""get next task to be dispatched to sub-process
On MP needs to check if the dependencies finished its execution
@returns : - None -> no more tasks to be executed
- JobXXX
"""
if self._stop_running:
return None # gentle stop
node = completed
while True:
# get next task from controller
try:
node = self.task_dispatcher.generator.send(node)
if node == "hold on":
self.free_proc += 1
return JobHold()
# no more tasks from controller...
except StopIteration:
# ... terminate one sub process if no other task waiting
return None
# send a task to be executed
if self.select_task(node, self.tasks):
# If sub-process already contains the Task object send
# only safe pickle data, otherwise send whole object.
task = node.task
if task.loader is DelayedLoaded and self.Child == Process:
return JobTask(task)
else:
return JobTaskPickle(task)
def _run_tasks_init(self, task_dispatcher):
"""initialization for run_tasks"""
self.task_dispatcher = task_dispatcher
self.tasks = task_dispatcher.tasks
def _run_start_processes(self, job_q, result_q):
"""create and start sub-processes
@param job_q: (multiprocessing.Queue) tasks to be executed
@param result_q: (multiprocessing.Queue) collect task results
@return list of Process
"""
# #### DEBUG PICKLE ERRORS
# class MyPickler (pickle._Pickler):
# def save(self, obj):
# print('pickling object {} of type {}'.format(obj, type(obj)))
# try:
# Pickler.save(self, obj)
# except:
# print('error. skipping...')
# from io import BytesIO
# pickler = MyPickler(BytesIO())
# pickler.dump(self)
# ### END DEBUG
proc_list = []
for _ in range(self.num_process):
next_job = self.get_next_job(None)
if next_job is None:
break # do not start more processes than tasks
job_q.put(next_job)
process = self.Child(
target=self.execute_task_subprocess,
args=(job_q, result_q, self.reporter.__class__))
process.start()
proc_list.append(process)
return proc_list
def _process_result(self, node, task, result):
"""process result received from sub-process"""
catched_excp = result.get('failure')
task.update_from_pickle(result['task'])
for action, output in zip(task.actions, result['out']):
action.out = output
for action, output in zip(task.actions, result['err']):
action.err = output
self.process_task_result(node, catched_excp)
def run_tasks(self, task_dispatcher):
"""controls subprocesses task dispatching and result collection
"""
# result queue - result collected from sub-processes
result_q = self.Queue()
# task queue - tasks ready to be dispatched to sub-processes
job_q = self.Queue()
self._run_tasks_init(task_dispatcher)
proc_list = self._run_start_processes(job_q, result_q)
# wait for all processes terminate
proc_count = len(proc_list)
try:
while proc_count:
# wait until there is a result to be consumed
result = result_q.get()
if 'exit' in result:
raise result['exit'](result['exception'])
node = task_dispatcher.nodes[result['name']]
task = node.task
if 'reporter' in result:
getattr(self.reporter, result['reporter'])(task)
continue
self._process_result(node, task, result)
# update num free process
free_proc = self.free_proc + 1
self.free_proc = 0
# tries to get as many tasks as free process
completed = node
for _ in range(free_proc):
next_job = self.get_next_job(completed)
completed = None
if next_job is None:
proc_count -= 1
job_q.put(next_job)
# check for cyclic dependencies
assert len(proc_list) > self.free_proc
except (SystemExit, KeyboardInterrupt, Exception):
if self.Child == Process:
for proc in proc_list:
proc.terminate()
raise
# we are done, join all process
for proc in proc_list:
proc.join()
# get teardown results
while not result_q.empty(): # safe because subprocess joined
result = result_q.get()
assert 'reporter' in result
task = task_dispatcher.tasks[result['name']]
getattr(self.reporter, result['reporter'])(task)
def execute_task_subprocess(self, job_q, result_q, reporter_class):
"""executed on child processes
@param job_q: task queue,
* None elements indicate process can terminate
* JobHold indicate process should wait for next task
* JobTask / JobTaskPickle task to be executed
"""
self.result_q = result_q
if self.Child == Process:
self.reporter = MReporter(self, reporter_class)
try:
while True:
job = job_q.get()
if job is None:
self.teardown()
return # no more tasks to execute finish this process
# job is an incomplete Task obj when pickled, attrbiutes
# that might contain unpickleble data were removed.
# so we need to get task from this process and update it
# to get dynamic task attributes.
if job.type is JobTaskPickle.type:
task = self.tasks[job.name]
if self.Child == Process: # pragma: no cover ...
# ... actually covered but subprocess doesnt get it.
task.update_from_pickle(job.task_dict)
elif job.type is JobTask.type:
task = pickle.loads(job.task_pickle)
# do nothing. this is used to start the subprocess even
# if no task is available when process is created.
else:
assert job.type is JobHold.type
continue # pragma: no cover
result = {'name': task.name}
task_failure = self.execute_task(task)
if task_failure:
result['failure'] = task_failure
result['task'] = task.pickle_safe_dict()
result['out'] = [action.out for action in task.actions]
result['err'] = [action.err for action in task.actions]
result_q.put(result)
except (SystemExit, KeyboardInterrupt, Exception) as exception:
# error, blow-up everything. send exception info to master process
result_q.put({
'exit': exception.__class__,
'exception': str(exception)})
class MThreadRunner(MRunner):
"""Parallel runner using threads"""
Queue = staticmethod(queue.Queue)
class DaemonThread(Thread):
"""daemon thread to make sure process is terminated if there is
an uncatch exception and threads are not correctly joined.
"""
def __init__(self, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
self.daemon = True
Child = staticmethod(DaemonThread)
@staticmethod
def available():
return True
|
mit
| 7,347,571,069,337,955,000
| 34.797891
| 80
| 0.56316
| false
| 4.513406
| false
| false
| false
|
BozoDev/CoCerBot
|
contrib/HangoutsBot/hangoutsbot/hangupsbot/plugins/CoCerBot/__init__.py
|
1
|
5738
|
import asyncio, io, logging, os, subprocess, re, time
import plugins
logger = logging.getLogger(__name__)
_cocext = { "running": False }
def _initialise(bot):
plugins.register_user_command(["screen", "coc"])
plugins.register_admin_command(["setlog", "clearlog"])
@asyncio.coroutine
def _open_file(name):
logger.debug("opening file: {}".format(name))
return open(name, 'rb')
@asyncio.coroutine
def _screen( filename):
logger.info("screen as {}".format(filename))
loop = asyncio.get_event_loop()
# read the file into a byte array
file_resource = yield from _open_file(filename)
file_data = yield from loop.run_in_executor(None, file_resource.read)
image_data = yield from loop.run_in_executor(None, io.BytesIO, file_data)
yield from loop.run_in_executor(None, os.remove, filename)
return image_data
def setlog(bot, event, *args):
"""set log from CoCerBot-pipe for current converation
use /bot clearlog to clear it
"""
logpipe = bot.conversation_memory_get(event.conv_id, 'logpipe')
if logpipe is None:
bot.conversation_memory_set(event.conv_id, 'logpipe', ''.join(args))
html = "<i><b>{}</b> updated logpipe URL".format(event.user.full_name)
yield from bot.coro_send_message(event.conv, html)
else:
html = "<i><b>{}</b> URL already exists for this conversation!<br /><br />".format(event.user.full_name)
html += "<i>Clear it first with /bot clearlog before setting a new one."
yield from bot.coro_send_message(event.conv, html)
def clearlog(bot, event, *args):
"""clear log-pipe for current converation
"""
logpipe = bot.conversation_memory_get(event.conv_id, 'logpipe')
if logpipe is None:
html = "<i><b>{}</b> nothing to clear for this conversation".format(event.user.full_name)
yield from bot.coro_send_message(event.conv, html)
else:
bot.conversation_memory_set(event.conv_id, 'logpipe', None)
html = "<i><b>{}</b> Log cleared for this conversation!<br />".format(event.user.full_name)
yield from bot.coro_send_message(event.conv, html)
def screen(bot, event, *args):
"""get a screenshot of current CoCerBot
"""
if _cocext["running"]:
yield from bot.coro_send_message(event.conv_id, "<i>processing another request, try again shortly</i>")
return
if args:
img = args[0]
else:
img = bot.conversation_memory_get(event.conv_id, 'img')
if img is None:
img = '/tmp/CoCNow.png'
else:
_cocext["running"] = True
if not re.match(r'^/tmp/', img):
img = '/tmp/' + img
filename = event.conv_id + "." + str(time.time()) +".png"
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
logger.debug("temporary screenshot file: {}".format(filepath))
params = ['/usr/bin/convert', '-colorspace', 'gray', img, filename ]
try:
subprocess.check_call(params)
except subprocess.CalledProcessError as e:
yield from bot.coro_send_message(event.conv, "<i>Imagick convert failed</i>".format(e))
_cocext["running"] = False
return
try:
loop = asyncio.get_event_loop()
image_data = yield from _screen( filename)
except Exception as e:
yield from bot.coro_send_message(event.conv_id, "<i>error getting screenshot</i>")
logger.exception("screencap failed".format(url))
_cocext["running"] = False
return
try:
image_id = yield from bot._client.upload_image(image_data, filename=filename)
yield from bot._client.sendchatmessage(event.conv.id_, None, image_id=image_id)
except Exception as e:
yield from bot.coro_send_message(event.conv_id, "<i>error uploading screenshot</i>")
logger.exception("upload failed".format(filename))
_cocext["running"] = False
finally:
_cocext["running"] = False
def coc(bot, event, *args):
"""Various actions for the bot to perform
"""
if _cocext["running"]:
yield from bot.coro_send_message(event.conv_id, "<i>processing another request, try again shortly</i>")
return
cmd = args[0]
while True:
if cmd == "init":
params = ['~/CoCerBot/HangoutsBot/hangoutsbot/hangupsbot/plugins/CoCerBot/init']
try:
subprocess.check_call(params)
except subprocess.CalledProcessError as e:
yield from bot.coro_send_message(event.conv, "<i>Error running init command</i>".format(e))
break
if cmd == "grab":
params = ['~/CoCerBot/HangoutsBot/hangoutsbot/hangupsbot/plugins/CoCerBot/grab']
try:
subprocess.check_call(params)
except subprocess.CalledProcessError as e:
yield from bot.coro_send_message(event.conv, "<i>Error running grab command</i>".format(e))
break
if cmd == "raw":
params = ['~/CoCerBot/HangoutsBot/hangoutsbot/hangupsbot/plugins/CoCerBot/raw', args[1], args[2], args[3], args[4], args[5]]
try:
subprocess.check_call(params)
except subprocess.CalledProcessError as e:
yield from bot.coro_send_message(event.conv, "<i>Error running raw command</i>".format(e))
break
logger.debug("No command entered")
yield from bot.coro_send_message(event.conv_id, "<i>Currently supported actions:</i><br>")
yield from bot.coro_send_message(event.conv_id, "<b>init</b> Start up the bot - get in, collect Resis<br>")
break
|
gpl-2.0
| 775,744,570,202,703,900
| 33.566265
| 134
| 0.615371
| false
| 3.595238
| false
| false
| false
|
rohanpm/qingfanyi
|
tests/test_navigator.py
|
1
|
5716
|
# coding=utf-8
# qingfanyi - Chinese to English translation tool
# Copyright (C) 2016 Rohan McGovern <rohan@mcgovern.id.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from qingfanyi.match import Match
from qingfanyi.navigator import Navigator
SAMPLE_GEOM = (100, 160, 200, 320)
class SignalSpy(object):
def __init__(self, navigator):
self.emits = []
navigator.connect('current_match_changed', self.on_signal)
def on_signal(self, sender, *args):
self.emits.append(tuple([sender] + list(args)))
def test_empty_navigator():
navigator = Navigator(SAMPLE_GEOM)
assert navigator.current_match is None
navigator.navigate_offset(-1)
assert navigator.current_match is None
navigator.navigate_offset(1)
assert navigator.current_match is None
def test_navigate_offset_single():
navigator = Navigator(SAMPLE_GEOM)
spy = SignalSpy(navigator)
match = Match('sample', [], [(0, 0, 10, 10)])
navigator.add_matches([match])
assert navigator.current_match is None
assert spy.emits == []
navigator.navigate_offset(-1)
assert navigator.current_match is match
assert spy.emits == [
(navigator, None, match)
]
navigator.navigate_offset(1)
assert navigator.current_match is match
assert len(spy.emits) == 1
navigator.navigate_offset(57)
assert navigator.current_match is match
assert len(spy.emits) == 1
def test_add_match_retains_current():
navigator = Navigator(SAMPLE_GEOM)
spy = SignalSpy(navigator)
# These matches are in the expected sorted order (by geometry),
# but we will add them in a different order.
matches = [
Match('ab', [], [(0, 0, 20, 10)]),
Match('a', [], [(0, 0, 10, 10)]),
Match('ab', [], [(10, 0, 20, 10)]),
Match('a', [], [(10, 0, 10, 10)]),
Match('ab', [], [(40, 0, 10, 10)]),
Match('ab', [], [(0, 30, 10, 10)]),
Match('ab', [], [(0, 40, 10, 10)]),
]
first_batch = [
matches[0],
matches[3],
matches[5]
]
second_batch = [
matches[1],
matches[2],
]
third_batch = [
matches[4],
matches[6],
]
navigator.add_matches(first_batch)
assert navigator.current_match is None
assert spy.emits == []
# should navigate through in the expected order
navigator.navigate_offset(1)
assert navigator.current_match is first_batch[0]
navigator.navigate_offset(1)
assert navigator.current_match is first_batch[1]
navigator.navigate_offset(1)
assert navigator.current_match is first_batch[2]
navigator.navigate_offset(1)
assert navigator.current_match is first_batch[0]
navigator.navigate_offset(1)
assert navigator.current_match is first_batch[1]
assert spy.emits == [
(navigator, None, first_batch[0]),
(navigator, first_batch[0], first_batch[1]),
(navigator, first_batch[1], first_batch[2]),
(navigator, first_batch[2], first_batch[0]),
(navigator, first_batch[0], first_batch[1]),
]
spy.emits = []
# now add some more
navigator.add_matches(second_batch)
# That should not have emitted anything or changed the current match
assert spy.emits == []
assert navigator.current_match is first_batch[1]
navigator.navigate_offset(-1)
assert navigator.current_match is matches[2]
assert spy.emits == [
(navigator, matches[3], matches[2])
]
spy.emits = []
# Add the last batch
navigator.add_matches(third_batch)
assert spy.emits == []
assert navigator.current_match is matches[2]
# It should have sorted all of these in the expected order
assert navigator.matches == matches
def test_set_current_match_by_point():
navigator = Navigator((100, 200, 500, 700))
spy = SignalSpy(navigator)
matches = [
Match('ab', [], [(100, 200, 20, 10)]),
Match('a', [], [(100, 200, 10, 10)]),
Match('ab', [], [(140, 200, 20, 10)]),
]
navigator.add_matches(matches)
# Overlapping matches - pick the longer one (by text)
matched = navigator.set_current_match_by_point(5, 5)
assert matched is matches[0]
assert navigator.current_match is matched
assert spy.emits == [
(navigator, None, matched)
]
spy.emits = []
# Simple match
matched = navigator.set_current_match_by_point(45, 5)
assert matched is matches[2]
assert navigator.current_match is matched
assert spy.emits == [
(navigator, matches[0], matched)
]
spy.emits = []
# Click the same match again, it should return it but not emit anything
matched = navigator.set_current_match_by_point(46, 6)
assert matched is matches[2]
assert navigator.current_match is matched
assert spy.emits == []
# Click somewhere with no match, it should return None and not emit anything nor
# change the current match.
matched = navigator.set_current_match_by_point(200, -30)
assert matched is None
assert navigator.current_match is matches[2]
assert spy.emits == []
|
gpl-3.0
| -6,003,047,400,805,221,000
| 29.731183
| 84
| 0.644682
| false
| 3.493888
| false
| false
| false
|
stormi/tsunami
|
src/primaires/objet/commandes/donner/__init__.py
|
1
|
5296
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'donner'."""
from fractions import Fraction
from primaires.interpreteur.commande.commande import Commande
from primaires.objet.conteneur import SurPoids
class CmdDonner(Commande):
"""Commande 'donner'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "donner", "give")
self.nom_categorie = "objets"
self.schema = "(<nombre>) <nom_objet> " \
"a/to <cible:personnage_present|nom_pnj>"
self.aide_courte = "donne un objet"
self.aide_longue = \
"Cette commande permet de donner un ou plusieurs " \
"objets à un autre personnage présent dans la salle. " \
"La forme simple de cette commande est |cmd|donner " \
"nom de l'objet à nom du personnage|ff| (ou |cmd|give " \
"nom de l'objet to nom du personnage|ff|, si vous " \
"êtes en anglais). Vous pouvez également préciser, " \
"avant le nom de l'objet, un nombre représentant " \
"le nombre d'objets à donner au personnage cible. " \
"Exemple : |cmd|donner 2 botte à tavernier|ff| (ou " \
"|cmd|give 2 botte to tavernier|ff| en anglais)."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
nom_objet.proprietes["quantite"] = "True"
nom_objet.proprietes["conteneur"] = "True"
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
personnage.agir("poser")
nombre = 1
if dic_masques["nombre"]:
nombre = dic_masques["nombre"].nombre
objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)[:nombre]
if hasattr(dic_masques["cible"], "personnage"):
cible = dic_masques["cible"].personnage
else:
cible = dic_masques["cible"].pnj
donne = 0
for objet, qtt, conteneur in objets:
if not objet.peut_prendre:
personnage << "Vous ne pouvez pas prendre {} avec vos " \
"mains...".format(objet.nom_singulier)
return
if qtt > nombre:
qtt = nombre
try:
dans = cible.ramasser(objet, qtt=qtt)
except SurPoids:
personnage << "{} ne peut rien porter de plus.".format(
cible.get_nom_pour(personnage))
return
if dans is None:
break
conteneur.retirer(objet, qtt)
donne += 1
if donne == 0:
personnage << "{} ne peut pas prendre cela.".format(
cible.get_nom_pour(personnage))
return
if donne < qtt:
donne = qtt
personnage << "Vous donnez {} à {}.".format(objet.get_nom(donne),
cible.get_nom_pour(personnage))
if not hasattr(cible, "prototype"):
cible << "{} vous donne {}.".format(personnage.get_nom_pour(cible),
objet.get_nom(donne))
personnage.salle.envoyer("{{}} donne {} à {{}}.".format(
objet.get_nom(donne)), personnage, cible)
# Appel de l'évènement 'donne' du PNJ
if hasattr(cible, "prototype"):
cible.script["donne"].executer(objet=objet,
quantite=Fraction(donne), personnage=personnage, pnj=cible)
|
bsd-3-clause
| -7,827,209,799,237,892,000
| 42.254098
| 79
| 0.617017
| false
| 3.513316
| false
| false
| false
|
peckhams/topoflow
|
topoflow/components/diversions_fraction_method.py
|
1
|
34322
|
# (2/3/13) Get "dt" from source_file or sink_file vs.
# channels comp, but what about canals ?
########################################################
#
# Copyright (c) 2010-2017, Scott D. Peckham
#
# Feb. 2017. Changes to internal variable names.
# Cleanup & testing with Test_Plane_Canal data.
# Sept 2014. Big changes so Channels component now requests
# what is needed from Diversions component.
#
# January 2013 (Revised handling of input/output names).
# October 2012 (CSDMS Standard Names with BMI)
# Jan-Feb 2010 (started from diversions_base.py)
# May 2010 (changes to unit_test())
#---------------------------------------------------------------------
# Notes: This component is written so that only a small amount
# of data is retrieved from, altered and then passed
# back to the Channels component. This is accomplished
# by using new interface functions in CSDMS_base.py,
# namely:
# get_values_in_grid_double()
# set_values_in_grid_double()
# get_values_in_grid_int()
# set_values_in_grid_int()
# Note that these also had to be added to the IRFPort
# for the TopoFlow CCA project, as defined in the file
# topoflow3.IRFPort.sidl.
#
# The old method required the Diversion component to
# retrieve the entire Q and vol grid from Channel component
# at each timestep, alter it, and then pass it back.
# (02/18/10)
#
# Part of the philosophy of this version is that only
# tiny changes to the code of the Channels component should
# be necessary. An intermediate approach (DIV_METHOD1)
# (before the above functions were added), required a new
# function "update_diversions()" to be added to the
# Channels component. However, that version seems to be
# somewhat faster. For the "test_plane_canal" test, the
# run time was 0.38 secs vs. 0.44 secs (on beach) for this
# new method. This extra cost should only be due to the
# extra interface function calls, and should therefore be
# a fixed cost that doesn't increase with grid size. This
# remains to be tested, however. To test, simply swap
# channels_base_DIV_METHOD1.py and
# diversions_fraction_method_DIV_METHOD1.py for
# channels_base.py and this file.
#
# cp.update_discharge() now calls dp.update(). (2/1/10)
#---------------------------------------------------------------------
#
# class diversions_component: (inherits from diversions_base.py)
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/16/12, Bolton)
# get_output_var_names() # (5/16/12, Bolton)
# get_var_name() # (5/16/12, Bolton)
# get_var_units() # (5/16/12, Bolton)
# update()
#----------------------------
# read_input_files()
# read_source_data()
# read_sink_data()
# read_canal_data()
#----------------------------
# update_sources()
# update_sinks()
# update_canals()
#
#---------------------------------------------------------------------
import numpy as np
import glob
import os
from topoflow.components import diversions_base
from topoflow.utils import cfg_files as cfg
from topoflow.utils import tf_utils
#---------------------------------------------------------------------
class diversions_component( diversions_base.diversions_component ):
#-----------------------------------------------------------------
_att_map = {
'model_name': 'Diversions_Fraction_Method',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform', ## (or "none" ?)
'time_step_type': 'fixed',
'step_method': 'explicit', ## (or "none" ?)
#------------------------------------------------------
'comp_name': 'Diversions', # CHANGE LATER ?
'model_family': 'TopoFlow',
'cfg_template_file': 'Diversions_Fraction_Method.cfg.in',
'cfg_extension': '_diversions_fraction_method.cfg',
'cmt_var_prefix': '/DiversionsFraction/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Diversions_Fraction_Method.xml',
'dialog_title': 'Diversions: Fraction Method Parameters',
'time_units': 'seconds' }
_input_var_names = [
'canals_entrance_water__volume_flow_rate' ] # canals_in_Q (from Channels)
_output_var_names = [
'canals__count', # n_canals
'canals_entrance_water__volume_fraction', # canals_in_Q_fraction
'canals_entrance__x_coordinate', # canals_in_x
'canals_entrance__y_coordinate', # canals_in_y
'canals_exit_water__volume_flow_rate', # canals_out_Q
'canals_exit__x_coordinate', # canals_out_x
'canals_exit__y_coordinate', # canals_out_y
'model__time_step', # dt
#-------------------------------------------
'sinks__count', # n_sinks
'sinks_water__volume_flow_rate', # sinks_Q
'sinks__x_coordinate', # sinks_x
'sinks__y_coordinate', # sinks_y
#-------------------------------------------
'sources__count', # n_sources
'sources_water__volume_flow_rate', # sources_Q
'sources__x_coordinate', # sources_x
'sources__y_coordinate' ] # sources_y
_var_name_map = {
'model__time_step': 'dt',
#-----------------------------------------------------------
'canals__count': 'n_canals',
'canals_entrance__x_coordinate': 'canals_in_x',
'canals_entrance__y_coordinate': 'canals_in_y',
'canals_entrance_water__volume_flow_rate': 'canals_in_Q', ##############
'canals_entrance_water__volume_fraction': 'canals_in_Q_fraction',
'canals_exit__x_coordinate': 'canals_out_x',
'canals_exit__y_coordinate': 'canals_out_y',
'canals_exit_water__volume_flow_rate': 'canals_out_Q',
#-----------------------------------------------------------
'sinks__count': 'n_sinks',
'sinks__x_coordinate': 'sinks_x',
'sinks__y_coordinate': 'sinks_y',
'sinks_water__volume_flow_rate': 'sinks_Q',
#-----------------------------------------------------------
'sources__count': 'n_sources',
'sources__x_coordinate': 'sources_x',
'sources__y_coordinate': 'sources_y',
'sources_water__volume_flow_rate': 'sources_Q' }
_var_units_map = {
'model__time_step': 's',
'canals_entrance_water__volume_flow_rate': 'm3 s-1',
#------------------------------------------------------
'canals__count': '1',
'canals_entrance__x_coordinate': 'm',
'canals_entrance__y_coordinate': 'm',
'canals_entrance_water__volume_fraction': '1',
'canals_exit__x_coordinate': 'm',
'canals_exit__y_coordinate': 'm',
'canals_exit_water__volume_flow_rate': 'm3 s-1',
#------------------------------------------------------
'sinks__count': '1',
'sinks__x_coordinate': 'm',
'sinks__y_coordinate': 'm',
'sinks_water__volume_flow_rate': 'm3 s-1',
#------------------------------------------------------
'sources__count': '1',
'sources__x_coordinate': 'm',
'sources__y_coordinate': 'm',
'sources_water__volume_flow_rate': 'm3 s-1' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Diversions_Fraction_Method'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
def update(self, time_seconds=None):
if (self.comp_status == 'Disabled'):
return
#-----------------------------------------------
# Update self.vol with inputs/outputs from all
# sources, sinks and diversions
#-----------------------------------------------
self.status = 'updating' # (OpenMI 2.0 convention)
# print '#### Calling update_sources()...'
self.update_sources()
# print '#### Calling update_sinks()...'
self.update_sinks()
# print '#### Calling update_canals()...'
self.update_canals()
#------------------------
# Update internal clock
#------------------------
# print '#### Calling update_time()...'
self.update_time()
self.status = 'updated' # (OpenMI 2.0 convention)
# update()
#--------------------------------------------------------------------------
def read_source_data(self):
#------------------------------------------------------------
# Notes: Assume that source_file contains key-value pairs,
# starting with "n_sources:", "nt_max" and "dt:",
# followed by "n_sources" blocks of the form:
#
# source_ID: (source pixel ID as long integer)
# nt: (number of discharge (Q) values)
# Q: (vector of discharges in m^3/s)
#------------------------------------------------------------
if (self.comp_status == 'Disabled'): return
if not(self.use_sources):
self.sources_x = self.initialize_scalar( 0, dtype='float64')
self.sources_y = self.initialize_scalar( 0, dtype='float64')
self.sources_Q = self.initialize_scalar( 0, dtype='float64')
return
#-----------------------------
# Can source_file be found ?
#-----------------------------
FOUND = tf_utils.file_exists( self.source_file )
if not(FOUND):
self.use_sources = False
return
#-------------------------
# Open the "source_file"
#-------------------------
file_unit = open(self.source_file, 'r')
#----------------------------------------------------
# Read number of sources, max number of timesteps
# for any source and the common timestep, source_dt
#----------------------------------------------------
n_sources = cfg.read_value(file_unit, dtype='Int32')
nt_max = cfg.read_value(file_unit, dtype='Int32')
source_dt = cfg.read_value(file_unit, dtype='Float64')
self.source_dt =source_dt
#--------------------
# Initialize arrays
#--------------------
self.source_cols = np.zeros([n_sources], dtype='Int32')
self.source_rows = np.zeros([n_sources], dtype='Int32')
self.nt_sources = np.zeros([n_sources], dtype='Int32')
self.sources_Q_all = np.zeros([n_sources, nt_max], dtype='Float64')
self.n_sources = n_sources
self.nt_max_sources = nt_max
#-----------------------------------
# Read information for each source
#-----------------------------------
for k in xrange(n_sources):
source_col = cfg.read_value(file_unit, dtype='Int32')
source_row = cfg.read_value(file_unit, dtype='Int32')
nt = cfg.read_value(file_unit, dtype='Int32')
Q_values = cfg.read_list_after_key(file_unit, dtype='Float64')
#---------------------------------------------------------------
nQ = np.size(Q_values)
print 'Diversions component: Read', nQ, 'Q_values for source.'
#---------------------------------------------------------------
self.source_cols[k] = source_col
self.source_rows[k] = source_row
self.nt_sources[k] = nt
self.sources_Q_all[k,0:nt] = Q_values
#-----------------------
# Close the input file
#-----------------------
file_unit.close()
#-------------------------------------
# Compute xy coordinates for sources
#-------------------------------------
self.sources_x = (source_cols * self.dx)
self.sources_y = (source_rows * self.dy)
# read_source_data()
#--------------------------------------------------------------------------
def read_sink_data(self):
#------------------------------------------------------------
# Notes: Assume that source_file contains key-value pairs,
# starting with "n_sinks:", "nt_max" and "dt:",
# followed by "n_sinks" blocks of the form:
#
# sink_ID: (sink pixel ID as long integer)
# nt: (number of discharge (Q) values)
# Q: (vector of discharges in m^3/s)
#------------------------------------------------------------
if (self.comp_status == 'Disabled'): return
if not(self.use_sinks):
self.sinks_x = self.initialize_scalar( 0, dtype='float64')
self.sinks_y = self.initialize_scalar( 0, dtype='float64')
self.sinks_Q = self.initialize_scalar( 0, dtype='float64')
return
#---------------------------
# Can sink_file be found ?
#---------------------------
FOUND = tf_utils.file_exists( self.sink_file )
if not(FOUND):
self.use_sinks = False
return
#-----------------------
# Open the "sink_file"
#-----------------------
file_unit = open(self.sink_file, 'r')
#------------------------------------------------
# Read number of sinks, max number of timesteps
# for any sink and the common timestep, dt
#------------------------------------------------
n_sinks = cfg.read_value(file_unit, dtype='Int32')
nt_max = cfg.read_value(file_unit, dtype='Int32')
sink_dt = cfg.read_value(file_unit, dtype='Float64')
self.sink_dt = sink_dt
#--------------------
# Initialize arrays
#--------------------
self.sink_cols = np.zeros([n_sinks], dtype='Int32')
self.sink_rows = np.zeros([n_sinks], dtype='Int32')
self.nt_sinks = np.zeros([n_sinks], dtype='Int32')
self.sinks_Q_all = np.zeros([n_sinks, nt_max], dtype='Float64')
self.n_sinks = n_sinks
self.nt_max_sinks = nt_max
#---------------------------------
# Read information for each sink
#---------------------------------
for k in xrange(n_sinks):
sink_col = cfg.read_value(file_unit, dtype='Int32')
sink_row = cfg.read_value(file_unit, dtype='Int32')
nt = cfg.read_value(file_unit, dtype='Int32')
Q_values = cfg.read_list_after_key(file_unit, dtype='Float64')
#---------------------------------------------------------------
nQ = size(Q_values)
print 'Diversions component: Read', nQ, 'Q_values for sink.'
#---------------------------------------------------------------
self.sink_cols[k] = sink_col
self.sink_rows[k] = sink_row
self.nt_sinks[k] = nt
self.sinks_Q_all[k,0:nt] = Q_values
#-----------------------
# Close the input file
#-----------------------
file_unit.close()
#-----------------------------------
# Compute xy coordinates for sinks
#-----------------------------------
sink_rows = (self.sink_IDs / self.nx)
sink_cols = (self.sink_IDs % self.nx)
self.sinks_x = (sink_cols * self.dx)
self.sinks_y = (sink_rows * self.dy)
# read_sink_data()
#--------------------------------------------------------------------------
def read_canal_data(self):
#-------------------------------------------------------------------
# Notes: Assume that canal_file contains key-value pairs,
# starting with "n_canals:" and followed by "n_canals"
# blocks of the form:
# canal_in_ID: (pixel ID as long integer)
# canal_out_ID: (pixel ID as long integer)
# Q_fraction: (fraction to take from in_ID in [0,1])
# travel_time: (canal travel time, in minutes)
#
# nt_canals is computed as ceil(travel_time / cp.dt)
#-------------------------------------------------------------------
# Note: Q_canals is same at upstream and downstream ends, but the
# downstream end lags the upstream end by the travel time
# from in_ID to out_ID. As a result, the duration and Q
# vector for the downstream end are computed from those of
# the upstream end, and the travel time, td, as:
# Q_out = [0, Q_in]
# dur_out = [td, dur_in]
# dur_sum_out = [0, dur_sum_in] + td
#
# Rather than create the dur_sum_canals_out and
# canals_out_Q vectors, can construct them in Update_Canals.
#-------------------------------------------------------------------
if (self.comp_status == 'Disabled'): return
if not(self.use_canals):
self.canals_in_x = self.initialize_scalar( 0, dtype='float64')
self.canals_in_y = self.initialize_scalar( 0, dtype='float64')
self.canals_in_Q_fraction = self.initialize_scalar( 0, dtype='float64')
self.canals_out_Q = self.initialize_scalar( 0, dtype='float64')
self.canals_out_x = self.initialize_scalar( 0, dtype='float64')
self.canals_out_y = self.initialize_scalar( 0, dtype='float64')
return
#---------------------------
# Can canal_file be found ?
#---------------------------
FOUND = tf_utils.file_exists( self.canal_file )
if not(FOUND):
self.use_canals = False
return
#------------------------
# Open the "canal_file"
#------------------------
file_unit = open(self.canal_file, 'r')
#------------------------
# Read number of canals
#------------------------
n_canals = cfg.read_value(file_unit, dtype='Int32')
self.n_canals = n_canals
#--------------------
# Initialize arrays
#--------------------
self.canals_in_col = np.zeros([n_canals], dtype='Int32')
self.canals_in_row = np.zeros([n_canals], dtype='Int32')
self.canals_out_col = np.zeros([n_canals], dtype='Int32')
self.canals_out_row = np.zeros([n_canals], dtype='Int32')
self.canals_in_Q_fraction = np.zeros([n_canals], dtype='Float64')
self.canal_times = np.zeros([n_canals], dtype='Float64')
#----------------------------------
# Read information for each canal
#----------------------------------
for k in xrange(n_canals):
canal_in_col = cfg.read_value(file_unit, dtype='Int32')
canal_in_row = cfg.read_value(file_unit, dtype='Int32')
canal_out_col = cfg.read_value(file_unit, dtype='Int32')
canal_out_row = cfg.read_value(file_unit, dtype='Int32')
Q_fraction = cfg.read_value(file_unit, dtype='Float64')
travel_time = cfg.read_value(file_unit, dtype='Float64')
#----------------------------------------------------------
self.canals_in_col[k] = canal_in_col
self.canals_in_row[k] = canal_in_row
self.canals_out_col[k] = canal_out_col
self.canals_out_row[k] = canal_out_row
self.canals_in_Q_fraction[k] = Q_fraction
self.canal_times[k] = travel_time
#----------------------------------------------------------
# print '### canal_in_col = ' + str(canal_in_col)
# print '### canal_in_row = ' + str(canal_in_row)
# print '### canal_out_col = ' + str(canal_out_col)
# print '### canal_out_row = ' + str(canal_out_row)
# print '### Q_fraction = ' + str(Q_fraction)
# print '### travel_time = ' + str(travel_time)
#--------------------------------------------------------
# Compute "nt_canals", which is the number of timesteps
# it takes for flow to travel from end to end.
#--------------------------------------------------------
# This depends on "self.dt", which is now read from the
# Diversion component CFG file. ## (9/22/14)
#--------------------------------------------------------
self.nt_canals = np.ceil(self.canal_times / self.dt)
#-----------------------
# Close the input file
#-----------------------
file_unit.close()
#-----------------------------------------------------
# Compute xy coordinates for canal entrance and exit
#-----------------------------------------------------
self.canals_in_x = (self.canals_in_col * self.dx)
self.canals_in_y = (self.canals_in_row * self.dy)
self.canals_out_x = (self.canals_out_col * self.dx)
self.canals_out_y = (self.canals_out_row * self.dy)
#-----------------------------------------------------
# Create a 2D array to store the discharge values as
# they are moving toward downstream end of canal.
#-----------------------------------------------------
# update_canals() will "roll" this array downstream
# by one array element each time step
#-----------------------------------------------------
nt_max = np.int(self.nt_canals.max())
nt_min = np.int(self.nt_canals.min())
self.canal_Q = np.zeros([n_canals, nt_max], dtype='Float64') ###################
self.nt_max = nt_max
print 'Diversions component: Min steps per canal =', nt_min
print 'Diversions component: Max steps per canal =', nt_max
#--------------------------------------------------
# Note that canals_in_Q comes from Channels comp.
#--------------------------------------------------
self.canals_out_Q = np.zeros([n_canals], dtype='Float64')
# read_canal_data()
#--------------------------------------------------------------------------
def update_sources(self):
#---------------------------------------------------------
# Notes: This function avoids loops in favor of array
# operations to increase speed.
#---------------------------------------------------------
# The number of Q-values for each source ID are
# stored as "self.nt_sources". However, for any
# given source ID, the Q-values beyond that index
# are set to zero.
#---------------------------------------------------------
if not(self.use_sources): return
#-------------------------------------------
# Update discharge, Q, for every source ID
#-------------------------------------------
if (self.time_index < self.nt_max_sources):
self.sources_Q[:] = self.sources_Q_all[ :, self.time_index ]
else:
self.sources_Q[:] = np.zeros(self.n_sources)
#------------------------------------------------------------
# Update flow volumes, vol, in CHANNELS component (2/17/10)
#------------------------------------------------------------
#--------------
# For testing
#--------------
## print 'Finished with update_sources() in Diversions.'
## print 'sources_Q ='
## print sources_Q
# update_sources()
#--------------------------------------------------------------------------
def update_sinks(self):
#-------------------------------------------------------
# Notes: This function avoids loops in favor of array
# operations to increase speed.
#-------------------------------------------------------
# The number of Q-values for each sink ID are
# stored as "self.nt_sinks". However, for any
# given sink ID, the Q-values beyond that index
# are set to zero.
#-------------------------------------------------------
# NB! This changes Q grid, so must be called before
# cp.update_flow_volume() uses the Q grid.
#-------------------------------------------------------
if not(self.use_sinks): return
#-----------------------------------------
# Update discharge, Q, for every sink ID
#-----------------------------------------
# Make sure sink cannot produce negative
# discharge values.
#-----------------------------------------
if (self.time_index < self.nt_max_sinks):
sinks_Q[:] = self.sinks_Q_all[ :, self.time_index ]
else:
sinks_Q[:] = np.zeros(self.n_sinks)
#--------------------------------------------------------
# Update discharges, Q, in CHANNELS component (2/17/10)
#--------------------------------------------------------
#------------------------------------------------------------
# Update flow volumes, vol, in CHANNELS component (2/17/10)
# NB! We MUST update "vol" also and not just "Q".
#------------------------------------------------------------
# update_sinks()
#--------------------------------------------------------------------------
def update_canals(self):
#----------------------------------------------------------
# Notes: Before 2/1/10, TopoFlow would update the channel
# component before the diversion component. Now
# cp.update_discharge() calls dp.update() itself.
#
# (2/16/10) Tested for single canal and seems to
# work as intended.
#
# NB! Flow volumes are incremented (by the function
# update_flow_volume(), but discharges are
# always recomputed from d, v and channel geom.
# So changes to cp.Q by calling dp.update()
# after cp.update() would be lost.
#
# cp.update() computes variables in this order:
# update_R()
# update_discharge() (using d and v)
# update_flow_volume() (using Q and R)
# update_flow_depth() (using vol)
# update_velocity()
#----------------------------------------------------------
# Notes: This function avoids loops in favor of array
# operations to increase speed.
#----------------------------------------------------------
# Notes: In this version, the Channels component uses
# canals_in_Q_fraction, canals_in_ID and its own
# Q grid to compute canals_in_Q. It then sets
# this into the Diversion component.
#----------------------------------------------------------
# print '#### Starting update_canals()...'
if not(self.use_canals): return
#---------------------------------------------------
# Update discharges, Q, at upstream ends of canals
# in CHANNELS component (2/17/10)
#---------------------------------------------------
#-------------------------------------------------------
# Update flow volumes, vol, at upstream ends of canals
# in CHANNELS component (2/17/10)
# NB! We MUST update "vol" also and not just "Q".
#-------------------------------------------------------
#---------------------------------------------
# Add specified discharge (Q_in) to upstream
# end of each canal (at array index 0)
#-----------------------------------------------------
# Diversions component now gets canals_in_Q from the
# Channels component as a requested input_var.
#-------------------------------------------------------
# Note that canal_Q is defined as:
# canal_Q = zeros([n_canals, nt_max], dtype='Float64')
#-------------------------------------------------------
# print '#### update_canals(), canal_Q block...'
self.canal_Q[:, 0] = self.canals_in_Q # (from Channels)
#------------------------------------------------
# Get "Q" at downstream end of each canal.
# It will be zero until flow has had time to
# travel the distance.
#------------------------------------------------
# NB! Each canal can have a different travel
# time and therefore its own "nt_canal" value.
#------------------------------------------------
# NB! canals_out_Q will be retrieved by the
# Channels component.
#-------------------------------------------------------
# Note that canal_Q is defined as:
# canal_Q = zeros([n_canals, nt_max], dtype='Float64')
#-------------------------------------------------------
# print '#### update_canals(), for loop...'
nc = self.n_canals
#-------------------------------------------------
# Zero out to be safe, since each canal can have
# a different nt_k.
#-------------------------------------------------
# self.canals_out_Q[:] = np.empty(nc, dtype='Float32')
self.canals_out_Q[:] = np.zeros(nc, dtype='Float32')
for k in xrange(nc):
nt_k = self.nt_canals[k]
self.canals_out_Q[k] = self.canal_Q[k, nt_k - 1]
## self.canal_Q[:, nt_k:] = 0.0 # (not necessary)
#---------------------------------------------------------
# Update flow volumes, vol, at downstream ends of canals
# in CHANNELS component (2/17/10)
# NB! We MUST update "vol" also and not just "Q".
#---------------------------------------------------------
#--------------
# For testing
#--------------
## print 'self.canal_Q ='
## print self.canal_Q
## print 'self.canals_out_Q ='
## print self.canals_out_Q
## print ' '
#----------------------------------------------------
# "Roll" the canal_Q array downstream (along its
# 2nd index, with index = 1) by one array element
# in each time step. "canal_Q" starts with zeros;
# i.e. canal_Q = zeros([n_canals, n_steps])
#----------------------------------------------------
# In next call to update_canals(), we'll replace
# the first Q-value in each canal.
#----------------------------------------------------
# print '#### update_canals(), roll...'
self.canal_Q = np.roll( self.canal_Q, 1, axis=1 )
# print '#### Exiting update_canals()...'
# update_canals()
#--------------------------------------------------------------------------
|
mit
| 4,706,929,756,352,890,000
| 45.318489
| 110
| 0.403619
| false
| 4.401949
| false
| false
| false
|
psyfako/psyfako_member_management
|
timetable/migrations/0001_initial.py
|
1
|
1210
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-22 22:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Slot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slot_name', models.CharField(max_length=200)),
('start_time', models.DateTimeField(verbose_name='Start Time')),
('end_time', models.DateTimeField(verbose_name='End Time')),
],
),
migrations.CreateModel(
name='Workgroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('moderation', models.CharField(max_length=200)),
('protocol', models.CharField(max_length=200)),
('slot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetable.Slot')),
],
),
]
|
gpl-3.0
| 4,596,746,112,430,652,400
| 33.571429
| 114
| 0.575207
| false
| 4.260563
| false
| false
| false
|
plantbiogeography/BaitsFinder
|
combined-ini.py
|
1
|
34661
|
#-*- encoding: utf-8 -*-
import commands
import platform
import sys
import os,os.path
import ctypes
import csv
import glob
import math
import numpy as np
import scipy as sp
import shutil
import traceback
#import globalvar
from functools import partial
from multiprocessing import cpu_count
CPU_n1=cpu_count()
CPU_n2=str(max(1,CPU_n1))
plat=platform.platform()
path=os.path.abspath(os.curdir)
config=path.replace('\\','/')+'/config.ini'
CRC0=os.path.exists(config)
if 'Windows' not in plat and 'Linux' not in plat:
#warning notice, if this script was not running in linux or windows!
print('\033[1;31;40m')
print('*' * 49)
print('***Please USE this script in Linux or windows!!!***')
print('*' * 49)
print('\033[0m')
exit(0)
elif "Linux" in plat:
Sys_ver="L"
elif 'Windows' in plat:
Sys_ver='W'
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE= -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01
FOREGROUND_GREEN= 0x02
FOREGROUND_RED = 0x04
FOREGROUND_INTENSITY = 0x08
BACKGROUND_BLUE = 0x10
BACKGROUND_GREEN= 0x20
BACKGROUND_RED = 0x40
BACKGROUND_INTENSITY = 0x80
class Color:
''' See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
for information on Windows APIs. '''
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_color(self, color, handle=std_out_handle):
"""(color) -> bit
Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def reset_color(self):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE)
def print_red_text(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_green_text(self, print_text):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_blue_text(self, print_text):
self.set_cmd_color(FOREGROUND_BLUE | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_red_text_with_blue_bg(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY| BACKGROUND_BLUE | BACKGROUND_INTENSITY)
print print_text
self.reset_color()
if os.path.exists(config)==0:
if Sys_ver=='L':
print('\033[1;31;40m')
print('*' * 73)
print('***Please make sure you have the config.ini file in current folder!!!***')
print('*' * 73)
print('\033[0m')
else:
if __name__ == "__main__":
clr = Color()
clr.print_red_text('*'*73)
clr.print_red_text('***Please make sure you have the config.ini file in current folder!!!***')
clr.print_red_text('*'*73)
exit(0)
con_file=open(config).readlines()
CRC0=0
CRC1=0
CRC2=0
rFAAnames = []
rGFFnames=[]
sFNAnames=[]
host1={}
host2={}
sufficient=[]
sorted_length=0
sorted_host_by_identity=0
baits_length=0
for line in con_file:
if '##' not in line:
if 'RAW_path' in line:
CRC0+=1
RAW_path=line.replace('\\','/').split('=')[1].strip()+'/'
RAW_path=RAW_path.replace('//','/')
elif 'Tem_path' in line:
CRC0+=1
Tem_path=line.replace('\\','/').split('=')[1].strip()+'/'
Tem_path=Tem_path.replace('//','/')
elif 'Out_path' in line:
CRC0+=1
Out_path=line.replace('\\','/').split('=')[1].strip()+'/'
Out_path=Out_path.replace('//','/')
elif 'Mafft_path' in line:
CRC0+=1
Mafft_path=line.replace('\\','/').split('=')[1].strip()+'/'
Mafft_path=Mafft_path.replace('//','/')
elif 'Blast_path' in line:
CRC0+=1
Blast_path=line.split('=')[1].strip()+'/'
Blast_path=Blast_path.replace('//','/')
elif 'Blast_ver' in line:
CRC0+=1
Blast_ver=line.split('=')[1].strip()
elif 'Blast_gap' in line:
CRC0+=1
Blast_gap=line.split('=')[1].strip()
elif 'rFAAnames' in line:
CRC0+=1
rFAAnames.append(line.split('=')[1].strip())
elif 'rFNAname' in line:
CRC0+=1
rFNAname=line.split('=')[1].strip()
elif 'rGFFnames' in line:
CRC0+=1
rGFFnames.append(line.split('=')[1].strip())
elif 'sFNAnames' in line:
CRC0+=1
CRC1+=1
sFNAnames.append(line.split('=')[1].strip())
host1[line.split('=')[1].strip()]='0'
host2[line.split('=')[1].strip()]=''
elif '=host_name=' in line:
CRC0+=1
CRC2+=1
a0=line.split('=')[0].strip()
a1=line.split('=')[2].strip()
host2[a0]=a1
if a1 !='NAN':
host1[a0]='1'
elif 'sorted_length' in line:
a0=line.split('=')[1].strip()
sorted_length=eval(a0)
elif 'sorted_host_by_identity' in line:
a0=line.split('=')[1].strip()
sorted_host_by_identity=float(a0)
elif 'sufficient_data' in line:
a0=line.split('=')[1].strip()
if a0!='NAN':
sufficient.append(a0)
elif 'baits_length' in line:
a0=line.split('=')[1].strip()
baits_length=eval(a0)
if CRC0==0 or CRC2!=CRC1 :
if Sys_ver=='L':
print('\033[1;31;40m')
print('*' * 73)
print('***Please make sure you have the config.ini file in current folder!!!***')
print('*' * 73)
print('\033[0m')
else:
if __name__ == "__main__":
clr = Color()
clr.print_red_text('*'*73)
clr.print_red_text('***Please make sure you have the config.ini file in current folder!!!***')
clr.print_red_text('*'*73)
exit(0)
if os.path.exists(Tem_path)==0:
os.makedirs(Tem_path)
if os.path.exists(Out_path)==0:
os.makedirs(Out_path)
files=os.listdir(RAW_path)
shutil.rmtree(Tem_path,True)
if os.path.exists(Tem_path)==0:
os.makedirs(Tem_path)
for fname in files:
shutil.copyfile(RAW_path+fname,Tem_path+fname)
## 2.3. Combine the all_hits files of each query taxon into a single file
i0=0
tcl_files=os.listdir(RAW_path)
Tem_path2=Tem_path+'cleaned/'
Tem_path3=Tem_path+'combined/'
TemL=[]
if os.path.exists(Tem_path2)==0:
os.makedirs(Tem_path2)
if os.path.exists(Tem_path3)==0:
os.makedirs(Tem_path3)
for fname in tcl_files:
if 'out.all_hits' in fname:
i0=i0+1
lines=open(Tem_path+fname).readlines()
myfile2=open(Tem_path2+fname,'w')
for line in lines:
a1=line.split()[1]
if 'no_hits_found' in a1:
continue
else:
myfile2.write(line)
TemL.append(line)
myfile2.close()
if Blast_gap=='1':
all_hits=str(i0)+'_all_hits_nogap'
elif Blast_gap=='2':
all_hits=str(i0)+'_all_hits_gap'
myfile=open(Tem_path3+all_hits,'w')
for line in TemL:
myfile.write(line)
myfile.close()
## 2.4. Remove putative contamination.
## 2.4.1. Construct a joint list of putative contamaninant sequences
## 2.4.2. Merge contaminant sequences into a single file
## 2.4.3. Remove contaminant sequences
CRC3=0
for i1 in host1.values():
i2=eval(i1)
CRC3=CRC3+i2
if CRC3!=0:
if os.path.exists(Tem_path+'all_host'):
os.remove(Tem_path+'all_host')
all_host=open(Tem_path+'all_host','a')
for fname in sFNAnames:
pver=host1[fname]
if pver=='1':
gene=[]
hname=host2[fname]
fname2=fname.split('.')[0]+'.sort_'+hname[:2]+'.out'
lines=open(Tem_path+fname2).readlines()
myfile=open(Tem_path+fname2+'.sort',"a")
for line in lines:
line=line.split("\t")
if(float(line[2])>=sorted_host_by_identity):
gene.append(line[0])
gene=list(set(gene))
for i in gene:
myfile.write(i+"\n")
all_host.write(i+"\n")
myfile.close()
all_host.close()
files=os.listdir(Tem_path)
lines=open(Tem_path+'all_host').readlines()
host_gid=[]
for line in lines:
a0=line.split()[0]
host_gid.append(a0)
for fname in files:
if 'ungap.out.matrix.identity' in fname:
output_file21=str.lower(fname[:2])+'_ungap_host_free'
lines=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file21,'w')
for line in lines:
a1=line.split()[0]
if a1 not in host_gid:
myfile.write(line)
myfile.close()
elif 'gap.out.matrix.identity' in fname:
output_file21=str.lower(fname[:2])+'_gap_host_free'
lines=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file21,'w')
for line in lines:
a1=line.split()[0]
if a1 not in host_gid:
myfile.write(line)
myfile.close()
print "Host-removed has been finished sucessfully!!"
else:
for fname in files:
if 'ungap.out.matrix.identity' in fname:
output_file21=str.lower(fname[:2])+'_ungap_host_free'
lines=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file21,'w')
for line in lines:
myfile.write(line)
myfile.close()
elif 'gap.out.matrix.identity' in fname:
output_file21=str.lower(fname[:2])+'_gap_host_free'
lines=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file21,'w')
for line in lines:
myfile.write(line)
myfile.close()
## 2.5. Identify and remove putative paralogues.
## 2.5.1. Add position information to matrix files
files=os.listdir(Tem_path)
lines2=open(Tem_path+'combined/'+all_hits).readlines()
for fname in files:
if ('_ungap_host_free' in fname or '_gap_host_free' in fname) and '.add' not in fname:
d=[]
output_file22=fname+'.add'
lines1=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file22,'w')
for line in lines1:
a0=line.split()[0]+'\t'+line.split()[1]+'\t'+line.split()[2]+'\t'+line.split()[3]+'\t'+line.split()[4]
d.append(a0)
for line in lines2:
a1=line.split()[0]+'\t'+line.split()[1]+'\t'+str(float(line.split()[3])/100)+'\t'+line.split()[2]+'\t'+line.split()[5]
a2=line.split()[0]+'\t'+line.split()[10]+'\t'+line.split()[11]+'\t'+line.split()[1]+'\t'+str(3*eval(line.split()[12])-2)+'\t'+str(3*eval(line.split()[13]))+'\t'+line.split()[-1]+'\t'+str(float(line.split()[3])/100)+'\n'
if a1 in d:
myfile.write(a2)
myfile.close()
## 2.5.2. Identify overlapping sequences from focal species that blast against the same reference gene
lines=open(Tem_path+output_file22).readlines()
output_file23=output_file22+'.sorted'
output_file24=output_file23+'.paralogInfo'
output_file25=output_file24+'.out'
myfile=open(Tem_path+output_file23,'w')
d=[]
f={}
x=0
for line in lines:
d.append(line.split())
y=str(x)
f[y]=''
x=x+1
d2=sorted(d, key=lambda result: (result[3],eval(result[4])),reverse=False)
for i in d2:
i1=str(i)
j=''
for k in i1.split():
j=j+k+'\t'
j=j[:-1]+'\n'
j=j.replace('[','')
j=j.replace(']','')
j=j.replace('\'','')
j=j.replace(',','')
myfile.write(j)
myfile.close()
lines=open(Tem_path+output_file23).readlines()
myfile=open(Tem_path+output_file24,'w')
ls=len(lines)
for i in range(1,ls):
l1=lines[i-1]
l2=lines[i]
z1=l1.split()[3]
z2=l2.split()[3]
z3=l1.split()[5]
z4=l2.split()[4]
if z1==z2 and (eval(z3)-eval(z4))>sorted_length:
f[str(i-1)]='PARALOG!'
f[str(i)]='PARALOG!'
for i in range(ls):
l1=lines[i]
if f[str(i)]=='PARALOG!':
l2=l1[:-1]+'\tPARALOG!\n'
else:
l2=l1
myfile.write(l2)
myfile.close()
## 2.5.3. Remove putative paralogues using a custom python script
lines=open(Tem_path+output_file24).readlines()
myfile=open(Tem_path+output_file25,'w')
over_bp=str(sorted_length) ##length of overlap
f={}
d=[]
for line in lines:
if len(line.split())>3:
a1=line.split()[3]
if 'PARALOG' in line:
d.append(a1)
for line in lines:
if len(line.split())>3:
a1=line.split()[3]
if a1 not in d:
myfile.write(line)
myfile.close()
## 2.5.4. Combine the paralogue-free data
all_host_free=[]
files=os.listdir(Tem_path)
i2=0
for fname in files:
if '.sorted.paralogInfo.out' in fname:
i2=i2+1
lines=open(Tem_path+fname).readlines()
for line in lines:
all_host_free.append(line)
if Blast_gap=='1':
fname3=str(i2)+'_nogap_hostfree_'+over_bp+'sorted'
elif Blast_gap=='2':
fname3=str(i2)+'_gap_hostfree_'+over_bp+'sorted'
myfile=open(Tem_path+fname3,'w')
for line in all_host_free:
myfile.write(line)
myfile.close()
print "Part.2 has been finished successfully!!"
## 3.1. Extract sequences of the non-paralogues genes from each of the focal species
lines=open(Tem_path+fname3).readlines()
for fname in sFNAnames :
iD=fname.split('.')[0]
if Blast_gap=='1':
output_file31=fname.split('.')[0]+'.nogap_edit.codons.query_all.seq'
elif Blast_gap=='2':
output_file31=fname.split('.')[0]+'.gap_edit.codons.query_all.seq'
lines1=open(Tem_path+fname).readlines()
myfile=open(Tem_path+output_file31,'w')
x=0
f={}
y=''
for line in lines1:
if '>' in line:
if x==1:
f[a0]=y
y=''
a0=line.split()[0][1:]
x=1
else:
y=y+line.split()[0]
f[a0]=y
for line in lines:
if iD in line:
a0=line.split()[0]
a1=line.split()[1]
a2=line.split()[2]
a3=line.split()[3]
a4=line.split()[4]
a5=line.split()[5]
a6=line.split()[6]
a7=line.split()[7]
a8=eval(a1)
a9=eval(a2)
b=f[a0]
if eval(a1)>eval(a2):
b1=b[a9-1:a8]
c0=' mod: 0.0 reverse '
else:
c0=' mod: t0.0 forward '
b1=b[a8-1:a9]
e1='>'+a0+' length: '+str(abs(eval(a1)-eval(a2)+1))+c0+a1+'-'+a2+' ['+a3+' '+a4+'-'+a5+']\n'
e2=b1+'\n'
myfile.write(e1)
myfile.write(e2)
myfile.close()
## 3.2. Combine, per gene, sequences of the reference species and of all focal species into a single file
if os.path.exists(Tem_path+'group1')==0:
os.mkdir(Tem_path+'group1')
Tem_path3=Tem_path+'/group1/'
lines1=open(Tem_path+fname3).readlines()
output_file32=fname3+'.map'
myfile=open(Tem_path+output_file32,'w')
d=[]
f={}
f1={}
f2={}
f3={}
f4={}
for line in lines1:
a0=line.split()[0]
a1=line.split()[1]
a2=line.split()[2]
a3=line.split()[3]
a4=eval(line.split()[4])
a5=eval(line.split()[5])
a6=a0+'*'+a1+'-'+a2
if a3 not in d:
d.append(a3)
f3[a3]=a4
f4[a3]=a5
f[a3]=a6
else:
f3[a3]=min(f3[a3],a4)
f4[a3]=max(f4[a3],a5)
f[a3]=f[a3]+'\t'+a6
for i in d:
myfile.write(i+'\t||\t'+f[i]+'\n')
myfile.close()
lines2=open(RAW_path+rFNAname).readlines()
g={}
b0=''
b1=''
b2=0
for line in lines2:
if '>' in line:
if b2 !=0:
g[b0]=b1+'\n'
b1=''
b0=line.split()[0][1:]
b2=1
else:
b1=b1+line.split()[0]
g[b0]=b1+'\n'
files=os.listdir(Tem_path)
x={}
x1={}
for fname in files:
if '.nogap_edit.codons.query_all.seq' in fname:
lines=open(Tem_path+fname).readlines()
ls=len(lines)/2
for i in range(ls):
l1=lines[2*i]
l2=lines[2*i+1]
y0=l1.split()[0][1:]+'*'+l1.split()[6]
y1=l1.split()[0][1:]+'*'+l1.split()[6]+'*'+l1.split()[-4]+'*'+l1.split()[-2]+'*'+l1.split()[-1]
x[y0]=l2
x1[y0]=y1
if '.gap_edit.codons.query_all.seq' in fname:
lines=open(Tem_path+fname).readlines()
ls=len(lines)/2
for i in range(ls):
l1=lines[2*i]
l2=lines[2*i+1]
y0=l1.split()[0][1:]+'*'+l1.split()[6]
y1=l1.split()[0][1:]+'*'+l1.split()[6]+'*'+l1.split()[-4]+'*'+l1.split()[-2]+'*'+l1.split()[-1]
x[y0]=l2
x1[y0]=y1
for i in d:
myfile=open(Tem_path3+i+'.fa','w')
myfile.write('>'+i+'\t'+str(f3[i])+'-'+str(f4[i])+'\n')
myfile.write(g[i][(f3[i]-1):f4[i]]+'\n')
j=f[i]
for k in j.split():
myfile.write('>'+x1[k].replace('*','\t')+'\n')
myfile.write(x[k])
myfile.close()
## 3.3 Bring sequences to the same orientation relative to the reference species
if os.path.exists(Tem_path+'group2')==0:
os.mkdir(Tem_path+'group2')
rootdir1=Tem_path+'/group1/'
rootdir2=Tem_path+'/group2/'
files=os.listdir(Tem_path+'/group1/')
f={}
f['A']='T'
f['T']='A'
f['U']='A'
f['C']='G'
f['G']='C'
f['N']='N'
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
ls=len(lines)/2
for i in range(ls):
l1=lines[2*i]
l2=lines[2*i+1]
if 'reverse' not in l1:
myfile.write(l1)
myfile.write(l2)
else:
myfile.write(l1)
l3=''
for j in l2[:-1][::-1]:
l3=l3+f[j]
l3=l3+'\n'
myfile.write(l3)
myfile.close()
## 3.4. Retain only genes with data from the best-quality focal species
if len(sufficient)>0:
if os.path.exists(Tem_path+'group3')==0:
os.mkdir(Tem_path+'group3')
rootdir1=Tem_path+'/group2/'
rootdir2=Tem_path+'/group3/'
files=os.listdir(Tem_path+'/group2/')
for file1 in files:
x=0
lines=open(rootdir1+file1).readlines()
for line in lines:
for i in sufficient:
j=i.split('.')[0]
if j in line:
x=1
if x==1:
myfile=open(rootdir2+file1,'w')
for line in lines:
myfile.write(line)
myfile.close()
else:
if os.path.exists(Tem_path+'group3')==0:
os.mkdir(Tem_path+'group3')
rootdir1=Tem_path+'/group2/'
rootdir2=Tem_path+'/group3/'
files=os.listdir(rootdir1)
for fname in files:
shutil.copyfile(rootdir1+fname,rootdir2+fname)
print "Part.3 has been finished successfully!!"
if Blast_gap=='1':
## 4.u1. Align sequences of the focal species and the reference species using information on start and end points of the BLAST alignment
if os.path.exists(Tem_path+'group5')==0:
os.mkdir(Tem_path+'group5')
rootdir1=Tem_path+'/group3/'
rootdir2=Tem_path+'/group5/'
files=os.listdir(Tem_path+'/group3/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
a0=lines[0].split()[-1]
a1=eval(a0.split('-')[0])
a2=eval(a0.split('-')[1])
l3='*'.join(lines[0].split())+'*length='+str(a2-a1+1)+'\n'
myfile.write(l3)
myfile.write(lines[1])
ls=len(lines)/2
for i in range(1,ls):
l1=lines[2*i]
l2=lines[2*i+1][:-1]
b0=l1.split()[-1][:-1]
b1=eval(b0.split('-')[0])
b2=eval(b0.split('-')[1])
c1=min(b1,b2)
c2=max(b1,b2)
l1='*'.join(l1.split())+'*length='+str(c2-c1+1)+'\n'
myfile.write(l1)
for i in range(a1,c1):
l2='-'+l2
for i in range(c2,a2):
l2=l2+'-'
myfile.write(l2+'\n')
myfile.close()
elif Blast_gap=='2':
## 4.g1. Align sequences of the focal species and the reference species using MAFFT
if os.path.exists(Tem_path+'mafft_out')==0:
os.mkdir(Tem_path+'mafft_out')
rootdir1=Tem_path+'/group3/'
rootdir2=Tem_path+'/mafft_out/'
myfile0=open('Mafft_error.log','w')
files=os.listdir(Tem_path+'group3')
for file1 in files:
if Sys_ver=='L':
val= os.system('mafft --ep 0 --genafpair --maxiterate 1000 '+rootdir1+file1+ '>' +rootdir2+file1)
elif Sys_ver=='W':
val= os.system(Mafft_path+'mafft.bat --ep 0 --genafpair --maxiterate 1000 '+rootdir1+file1+ '>' +rootdir2+file1)
if val!=0:
myfile0.write(file1+'\n')
myfile0.close()
## 4.g2. Change the MAFFT output from multi-line to single-line fasta
if os.path.exists(Tem_path+'group4')==0:
os.mkdir(Tem_path+'group4')
rootdir1=Tem_path+'/mafft_out/'
rootdir2=Tem_path+'/group4/'
files=os.listdir(Tem_path+'/mafft_out/')
for file1 in files:
x=0
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
for line in lines:
if '>' in line:
if x!=0:
myfile.write('\n')
else:
x=1
myfile.write(line)
else:
myfile.write(line.split()[0])
myfile.write('\n')
myfile.close()
## 4.g3. Change white spaces in the sequence titles to asterisks
if os.path.exists(Tem_path+'group5')==0:
os.mkdir(Tem_path+'group5')
rootdir1=Tem_path+'/group4/'
rootdir2=Tem_path+'/group5/'
files=os.listdir(Tem_path+'/group4/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
a0=lines[0].split()[-1]
a1=eval(a0.split('-')[0])
a2=eval(a0.split('-')[1])
l3='*'.join(lines[0].split())+'*length='+str(a2-a1+1)+'\n'
myfile.write(l3)
myfile.write(lines[1])
ls=len(lines)/2
for i in range(1,ls):
l1=lines[2*i]
l2=lines[2*i+1]
b0=l1.split()[-1][:-1]
b1=eval(b0.split('-')[0])
b2=eval(b0.split('-')[1])
c1=min(b1,b2)
c2=max(b1,b2)
l1='*'.join(l1.split())+'*length='+str(c2-c1+1)+'\n'
myfile.write(l1)
myfile.write(l2)
myfile.close()
print "Part.4 has been finished successfully!!"
## 5.1 Extend sequences to start witrh the first position of the cds
if os.path.exists(Tem_path+'filling-in')==0:
os.mkdir(Tem_path+'filling-in')
rootdir1=Tem_path+'/group5/'
rootdir2=Tem_path+'/filling-in/'
files=os.listdir(Tem_path+'/group5/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
l1=lines[0]
l1=l1.replace('-','\t')
l1=l1.replace('_','\t')
a0=l1.split()[0]
a0=a0.split('*')[1]
if eval(a0) !=1:
for line in lines:
if '>' in line:
myfile.write(line)
else:
for i in range(eval(a0)-1):
myfile.write('+')
myfile.write(line)
else:
for line in lines:
myfile.write(line)
myfile.close()
## 5.2. Transpose the aligned sequences
if os.path.exists(Tem_path+'group6')==0:
os.mkdir(Tem_path+'group6')
rootdir1=Tem_path+'/filling-in/'
rootdir2=Tem_path+'/group6/'
files=os.listdir(Tem_path+'/filling-in/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
ls=len(lines)/2
ls2=len(lines[1])
for i in range(ls):
l1=lines[2*i][:-1]
l1='~'.join(l1.split())
myfile.write(l1+'\t')
myfile.write('\n')
for j in range(ls2):
for l in range(ls):
l2=lines[2*l+1]
l2=l2.split()[0]
l2=l2+'\n'
myfile.write(l2[j]+'\t')
myfile.write('\n')
myfile.close()
## 5.3. Clean the files with transposed sequences
if os.path.exists(Tem_path+'group7')==0:
os.mkdir(Tem_path+'group7')
rootdir1=Tem_path+'/group6/'
rootdir2=Tem_path+'/group7/'
files=os.listdir(Tem_path+'/group6/')
for file1 in files:
file2=file1.replace('.txt','.fa')
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file2,'w')
for line in lines:
if len(line.split())>=2:
myfile.write(line)
myfile.close()
## 5.4. Compile start and end points of all exons per gene in a single file
myfile=open(Tem_path+'map','w')
for fname in rGFFnames:
lines=open(RAW_path+fname).readlines()
d=[]
f={}
i=0
j=''
for line in lines:
if len(line.split())>3:
if line.split()[2]=='mRNA':
if i !=0:
myfile.write(j+'\t'+ax+'\t||\t'+f[j]+'\n')
i=i+1
ax=line.split()[6]
a0=line.split()[8]
a1=a0.split(';')[0]
a2=a1.split('=')[1]
a3=a2.split('.TAIR')[0]
j=a3
elif line.split()[2]=='CDS':
b0=line.split()[3]
b1=line.split()[4]
c=b0+'\t'+b1+'\t|\t'
if j in d:
f[j]=f[j]+c
else:
d.append(j)
f[j]=c
myfile.close()
## 5.5. Add flags for split site positions in the transposed alignment
if os.path.exists(Tem_path+'group8')==0:
os.mkdir(Tem_path+'group8')
lines=open(Tem_path+'/map').readlines()
rootdir1=Tem_path+'/group7/'
rootdir2=Tem_path+'/group8/'
files=os.listdir(Tem_path+'/group7/')
f={}
d=[]
for line in lines:
g=[]
a0=line.split()[0]
a1=line.split()[3]
a2=line.split()[4]
b1=eval(a1)
b2=eval(a2)
d.append(a0)
for j in line.split('|')[2:-1]:
c0=j.split()[0]
c1=j.split()[1]
c2=eval(c0)
c3=eval(c1)
c4=abs(c3-c2+1)
c5=str(c4)
g.append(c5)
f[a0]=g
for file1 in files:
k0=file1.split('_')[0]
k1=k0.split('.')[0]+'.'+k0.split('.')[1]
k2=f[k1]
lines1=open(rootdir1+file1).readlines()
myfile=open(rootdir2+k1,'w')
ls=len(lines1[1].split())
i=0
k3=[]
j=0
for x in k2:
j=j+eval(x)
k3.append(j)
for line in lines1:
if i in k3:
myfile.write(line)
for k in range(ls):
myfile.write('S\t')
myfile.write('\n')
else:
myfile.write(line)
if len(line.split())>=1 and line.split()[0] !='-':
i=i+1
myfile.close()
## 5.6. Extract exons into separate files
if os.path.exists(Tem_path+'group9')==0:
os.mkdir(Tem_path+'group9')
rootdir1=Tem_path+'/group8/'
rootdir2=Tem_path+'/group9/'
files=os.listdir(Tem_path+'/group8/')
for file1 in files:
i=0
j=0
k=0
lines=open(rootdir1+file1).readlines()
l1=lines[0]
for line in lines[1:]:
if 'S' not in line:
myfile=open(rootdir2+file1+'.'+str(i+1),'a')
if j==0:
myfile.write(l1)
j=j+1
myfile.write(line)
myfile.close()
else:
j=0
i=i+1
## 5.7. Remove exons of insufficient length
if os.path.exists(Tem_path+'group10')==0:
os.mkdir(Tem_path+'group10')
rootdir1=Tem_path+'/group9/'
rootdir2=Tem_path+'/group10/'
files=os.listdir(Tem_path+'/group9/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
d=[]
for line in lines[1:]:
if len(line.split())>=1:
a0=line.split()[0]
if a0 !='+' and a0 !='-':
d.append(a0)
if len(d)>=baits_length:
myfile=open(rootdir2+file1,'w')
for line in lines:
myfile.write(line)
myfile.close()
print "Part.5 has been finished successfully!!"
## 6.1. Re-transpose files containing the exon sequences to fasta files
if os.path.exists(Tem_path+'group11')==0:
os.mkdir(Tem_path+'group11')
rootdir1=Tem_path+'/group10/'
rootdir2=Tem_path+'/group11/'
files=os.listdir(Tem_path+'/group10/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file1,'w')
d=[]
e=[]
l1=lines[0]
a1=len(l1.split())
for i in l1.split():
j=i+'\n'
j=j.replace('~',' ')
d.append(j)
for k in range(a1):
myfile.write(d[k])
for line in lines[1:]:
if len(line.split())==a1:
l=line.split()[k]
l=l.replace('+','')
myfile.write(l)
myfile.write('\n')
myfile.close()
## 6.2. Remove gaps in exons
if os.path.exists(Tem_path+'group12')==0:
os.mkdir(Tem_path+'group12')
if os.path.exists(Tem_path+'group13')==0:
os.mkdir(Tem_path+'group13')
rootdir1=Tem_path+'/group11/'
rootdir2=Tem_path+'/group12/'
rootdir3=Tem_path+'/group13/'
files=os.listdir(Tem_path+'/group11/')
for file1 in files:
file2=file1+'.fas'
file3=file1+'.120bp.fas'
lines=open(rootdir1+file1).readlines()
myfile=open(rootdir2+file2,'w')
myfile2=open(rootdir3+file3,'w')
ls=len(lines)/2
for i in range(ls):
l1=lines[2*i]
l2=lines[2*i+1]
l1=l1.replace('*','\t')
l3=l2.replace('-','')
ls2=len(l3)
if ls2>1:
myfile.write(l1)
myfile.write(l3)
if ls2>=(baits_length+1):
myfile2.write(l1)
myfile2.write(l3)
myfile.close()
myfile2.close()
## 6.3. Extract baits
if os.path.exists(Tem_path+'group14')==0:
os.mkdir(Tem_path+'group14')
rootdir1=Tem_path+'/group13/'
rootdir2=Tem_path+'/group14/'
files=os.listdir(Tem_path+'/group13/')
for file1 in files:
lines=open(rootdir1+file1).readlines()
ls=len(lines)/2
a0=file1[:-4]+'_'
for i in range(1,ls):
x1=0
l1=lines[2*i]
l2=lines[2*i+1]
a1=l1.split()[0][1:]
myfile=open(rootdir2+a0+a1+'.fasta','w')
y=(len(l2)-1)%60
if y>40:
x1=1
x3=(len(l2)-1)/60-1
for j in range(x3):
b0=60*(j)
b1=60*(j+2)
myfile.write(l1[:-1]+'_'+str(j)+'\n')
myfile.write(l2[b0:b1]+'\n')
if x1==1:
myfile.write(l1[:-1]+'_'+str(j+1)+'_reversed\n')
myfile.write(l2[-baits_length-1:])
myfile.close()
files2=os.listdir(Tem_path+'/group14/')
if Blast_gap=='1':
myfile=open(Out_path+'/nogap_baits.combined.fasta','w')
elif Blast_gap=='2':
myfile=open(Out_path+'/gap_baits.combined.fasta','w')
for file1 in files2:
a0=file1.split('_')[0]
lines=open(rootdir2+file1).readlines()
for line in lines:
if '>' in line:
line='>'+a0+'_'+line[1:]
myfile.write(line)
myfile.close()
## 6.4. Combine exon sequences into single files per species
rootdir=Tem_path+'group13/'
for fname in sFNAnames:
fname1=fname[:4]+'.fasta'
myfile=open(Tem_path+fname1,'w')
files=os.listdir(Tem_path+'group13/')
for file1 in files:
a=file1[:-9]
lines=open(rootdir+file1).readlines()
ls=len(lines)/2
for i in range(ls):
l1=lines[2*i]
l3='>'+a+'_'+l1[1:]
l2=lines[2*i+1]
a0=l1[1:5]
if a0 in fname1:
myfile.write(l3)
myfile.write(l2)
myfile.close()
print "Part.6 has been finished successfully!!"
## 7.1. Remove genes with too few baits in teh best-quality focal species
if len(sufficient)>0:
if Blast_gap=='1':
lines=open(Out_path+'nogap_baits.combined.fasta').readlines()
myfile=open(Out_path+'nogap_sorted.result.fa','w')
elif Blast_gap=='2':
lines=open(Out_path+'gap_baits.combined.fasta').readlines()
myfile=open(Out_path+'gap_sorted.result.fa','w')
f={}
d=[]
e=[]
for line in lines:
x0=0
for x1 in sufficient:
x1=x1.split('.')[0]
if x1 in line:
x0=1
if x0==1:
a1=line.split()[3]
if a1 not in d:
d.append(a1)
f[a1]=1
else:
f[a1]=f[a1]+1
for i in d:
j=f[i]
if j>=4:
e.append(i)
ls=len(lines)/2
for k in range(ls):
l1=lines[2*k]
l2=lines[2*k+1]
c=l1.split()[3]
if c in e:
myfile.write(l1)
myfile.write(l2)
myfile.close()
## Wait for CD-HIT-EST
|
gpl-2.0
| 7,990,784,050,546,604,000
| 29.424297
| 231
| 0.510603
| false
| 2.986473
| false
| false
| false
|
drvinceknight/Axelrod
|
axelrod/strategies/meta.py
|
1
|
4202
|
import random
from axelrod import Player
class MetaPlayer(Player):
"""A generic player that has its own team of players."""
team = []
def __init__(self):
Player.__init__(self)
# Make sure we don't use any meta players to avoid infinite recursion.
self.team = [t for t in self.team if not issubclass(t, MetaPlayer)]
self.nteam = len(self.team)
# Initiate all the player in out team.
self.team = [t() for t in self.team]
# If the team will have stochastic players, this meta is also stochastic.
self.stochastic = any([t.stochastic for t in self.team])
def strategy(self, opponent):
# Make sure the history of all hunters is current.
for ih in range(len(self.team)):
self.team[ih].history = self.history
# Get the results of all our players.
results = [player.strategy(opponent) for player in self.team]
# A subclass should just define a way to choose the result based on team results.
return self.meta_strategy(results)
def meta_strategy(self, results):
"""Determine the meta result based on results of all players."""
pass
class MetaMajority(MetaPlayer):
"""A player who goes by the majority vote of all other non-meta players."""
name = "Meta Majority"
def __init__(self):
# We need to import the list of strategies at runtime, since
# _strategies import also _this_ module before defining the list.
from _strategies import ordinary_strategies
self.team = ordinary_strategies
MetaPlayer.__init__(self)
def meta_strategy(self, results):
if results.count('D') > results.count('C'):
return 'D'
return 'C'
class MetaMinority(MetaPlayer):
"""A player who goes by the minority vote of all other non-meta players."""
name = "Meta Minority"
def __init__(self):
# We need to import the list of strategies at runtime, since
# _strategies import also _this_ module before defining the list.
from _strategies import ordinary_strategies
self.team = ordinary_strategies
MetaPlayer.__init__(self)
def meta_strategy(self, results):
if results.count('D') < results.count('C'):
return 'D'
return 'C'
class MetaWinner(MetaPlayer):
"""A player who goes by the strategy of the current winner."""
name = "Meta Winner"
def __init__(self, team=None):
# The default is to used all strategies available, but we need to import the list
# at runtime, since _strategies import also _this_ module before defining the list.
if team:
self.team = team
else:
from _strategies import ordinary_strategies
self.team = ordinary_strategies
MetaPlayer.__init__(self)
# For each player, we will keep the history of proposed moves and
# a running score since the beginning of the game.
for t in self.team:
t.proposed_history = []
t.score = 0
def strategy(self, opponent):
# Update the running score for each player, before determining the next move.
if len(self.history):
for player in self.team:
pl_C = player.proposed_history[-1] == "C"
opp_C = opponent.history[-1] == "C"
s = 2 * (pl_C and opp_C) or 5 * (pl_C and not opp_C) or 4 * (not pl_C and not opp_C) or 0
player.score += s
return MetaPlayer.strategy(self, opponent)
def meta_strategy(self, results):
scores = [pl.score for pl in self.team]
bestscore = min(scores)
beststrategies = [i for i, pl in enumerate(self.team) if pl.score == bestscore]
bestproposals = [results[i] for i in beststrategies]
bestresult = "C" if "C" in bestproposals else "D"
# Update each player's proposed history with his proposed result, but always after
# the new result has been settled based on scores accumulated until now.
for r, t in zip(results, self.team):
t.proposed_history.append(r)
return bestresult
|
mit
| -4,386,408,245,011,585,500
| 31.330769
| 105
| 0.618039
| false
| 4.06383
| false
| false
| false
|
hpleva/pyemto
|
pyemto/examples/alloy_discovery/collect_alloy_final.py
|
1
|
6838
|
import pyemto
import pyemto.utilities as utils
import numpy as np
import os
latpath = "../../../" # Path do bmdl, kstr and shape directories
# each system need to have same number of alloy elements
#systems = [['Fe','Al'],['Fe','Cr']]
systems = [['Fe'],['Al']]
systems = [['Al']]
#concentrations = [[0.5,0.5]]
concentrations = [[1.0]]
magn = "NM" # Possible values DLM, FM and NM
# Sanity checks
for s in systems:
if not len(s) == len(systems[0]):
print("Each system need to have same number of alloy elements!")
exit()
for c in concentrations:
if not len(c) == len(systems[0]):
print("Each given concetrations must have same number number as elements in system!")
exit()
# Next check magnetic states of system and initialize splits
splits = []
if magn == "FM":
for s in systems:
splt = []
for atom in s:
if atom == "Fe":
splt.append(2.0)
else:
splt.append(0.5)
splits.append(splt)
elif magn == "DLM":
# First duplicate each atoms and concetration
newsystems = []
newconcs = []
for i in range(len(systems)):
news = []
newc = []
splt = []
for j in range(len(systems[i])):
print(i,j)
news.append(systems[i][j])
news.append(systems[i][j])
if systems[i][j] == "Fe":
splt.append( 2.0)
splt.append(-2.0)
else:
splt.append( 0.5)
splt.append(-0.5)
splits.append(splt)
newsystems.append(news)
systems = newsystems
for c in concentrations:
newc = []
for conc in c:
newc.append(conc)
newc.append(conc)
newconcs.append(newc)
concentrations = newconcs
else:
for s in systems:
splt = []
for atom in s:
splt.append(0.0)
splits.append(splt)
results = []
#We are ready to make inputs
for si in range(len(systems)):
s = systems[si]
# Create main directory
sname = ""
if magn == "DLM":
nlist = [s[i] for i in range(0,len(s),2)]
else:
nlist = s
for atom in nlist:
sname = sname + atom
#
# Make directories
if not os.path.lexists(sname):
os.makedirs(sname)
for c in concentrations:
sc_res = []
# Make subdirectory for concentration
cname = ""
count = 0
if magn == "DLM":
clist = [c[i] for i in range(0,len(c),2)]
else:
clist = c
for conc in clist:
count += 1
cname = cname +str(int(conc*1000)).zfill(4)
if not count == len(clist):
cname = cname+"-"
apath = os.path.join(sname,cname)
if not os.path.lexists(apath):
os.makedirs(apath)
# Make subdirectory for magnetic state
apath = os.path.join(apath,magn)
if not os.path.lexists(apath):
os.makedirs(apath)
# Construct base jobname
jobname = ""
for i in range(len(nlist)):
if jobname == "":
pass
else:
jobname = jobname + "_"
jobname = jobname + nlist[i].lower() + "%4.2f" % (clist[i])
finalname = jobname + "_final"
# BCC first
alloy = pyemto.System(folder=apath)
initialsws = 3.0 # We need some clever way to get this
alloy.bulk(lat='bcc', jobname=jobname+"_bcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.bulk(lat='bcc',jobname=finalname+"_bcc",latpath=latpath,
sws=sws0,atoms = s,concs = c)
# get energy of final
e_dft = alloy.get_energy()
sc_res.append([e_dft,sws0,B0,e0])
# FCC second
alloy = pyemto.System(folder=apath)
initialsws = 3.0 # We need some clever way to get this
alloy.bulk(lat='fcc', jobname=jobname+"_fcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.bulk(lat='fcc', jobname=finalname+"_fcc", latpath=latpath,sws=sws0,
atoms = s, concs = c)
# get energy of final
e_dft = alloy.get_energy()
sc_res.append([e_dft,sws0,B0,e0])
# HCP last
alloy = pyemto.System(folder=apath)
initialsws = 3.0 # We need some clever way to get this
alloy.bulk(lat='hcp',
#jobname=jobname+"_hcp",
jobname=jobname, # hcp add automatically hcp string to jobname
latpath=latpath, sws=initialsws, atoms = s,
concs = c, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, c_over_a0, B0, e0, R0, cs0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.sws = sws0
ca = round(c_over_a0,3)
hcpname ="hcp_"+str(ca) # Structure name
alloy.bulk(lat='hcp', jobname=finalname+"_hcp",latpath=latpath, latname=hcpname,
sws=sws0, ca= ca, atoms = s, concs = c)
alloy.write_inputs()
# get energy of final
e_dft = alloy.get_energy()
sc_res.append([e_dft,sws0,B0,e0,ca])
results.append([[s,c],sc_res])
for r in results:
# Generate system name
sname = ""
for i in range(len(r[0][0])):
sname=sname+r[0][0][i]+str(r[0][1][i])
output = "System: "+sname+"\n"
output = output + " Magn: " +magn+"\n"
bcc = r[1][0]
bcc_lc = utils.wsrad_to_latparam(bcc[1],'bcc')
output = output+"# Strc. dft E lc sws B fit E (c/a)\n"
output = output+" bcc: %f %f %f %f %f\n" %(bcc[0],bcc_lc,bcc[1],bcc[2],bcc[3])
fcc = r[1][1]
fcc_lc = utils.wsrad_to_latparam(fcc[1],'fcc')
output = output + " fcc: %f %f %f %f %f\n" %(fcc[0],fcc_lc,fcc[1],fcc[2],fcc[3])
hcp = r[1][2]
hcp_lc = utils.wsrad_to_latparam(hcp[1],'hcp',ca=hcp[4])
output = output +" hpc: %f %f %f %f %f %f\n" %(hcp[0],hcp_lc,hcp[1],hcp[2],hcp[3],hcp[4])
if magn == "DLM" or magn == "FM":
# Print magnetic states of system if available
pass
print(output)
|
mit
| 5,948,215,581,552,546,000
| 31.407583
| 98
| 0.535244
| false
| 3.122374
| false
| false
| false
|
cligs/pyzeta
|
scripts/correlation.py
|
1
|
1579
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# file: experimental.py
# version: 0.1.0
# source: https://github.com/maslinych/linis-scripts/blob/master/rbo_calc.py
# ported to Python 3 and slightly modified by Albin Zehe
def calc_rbo(l1, l2, p=0.98):
"""
Returns RBO indefinite rank similarity metric, as described in:
Webber, W., Moffat, A., & Zobel, J. (2010).
A similarity measure for indefinite rankings.
ACM Transactions on Information Systems.
doi:10.1145/1852102.1852106.
"""
sl, ll = sorted([(len(l1), l1), (len(l2), l2)])
s, S = sl
l, L = ll
# Calculate the overlaps at ranks 1 through l
# (the longer of the two lists)
ss = set([])
ls = set([])
overs = {}
for i in range(l):
ls.add(L[i])
if i < s:
ss.add(S[i])
X_d = len(ss.intersection(ls))
d = i + 1
overs[d] = float(X_d)
# (1) \sum_{d=1}^l (X_d / d) * p^d
sum1 = 0
for i in range(l):
d = i + 1
sum1 += overs[d] / d * pow(p, d)
X_s = overs[s]
X_l = overs[l]
# (2) \sum_{d=s+1}^l [(X_s (d - s)) / (sd)] * p^d
sum2 = 0
for i in range(s, l):
d = i + 1
sum2 += (X_s * (d - s) / (s * d)) * pow(p, d)
# (3) [(X_l - X_s) / l + X_s / s] * p^l
sum3 = ((X_l - X_s) / l + X_s / s) * pow(p, l)
# Equation 32.
rbo_ext = (1 - p) / p * (sum1 + sum2) + sum3
return rbo_ext
if __name__ == "__main__":
list1 = ['A', 'B', 'C', 'D', 'E', 'H']
list2 = ['D', 'B', 'F', 'A']
print
calc_rbo(list1, list2, 0.98)
|
gpl-3.0
| 6,396,309,194,905,206,000
| 25.762712
| 76
| 0.484484
| false
| 2.490536
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.