code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pytest
from hashlib import md5
from marshmallow import ValidationError, missing as missing_
from marshmallow.fields import Int
from marshmallow_jsonapi import Schema
from marshmallow_jsonapi.fields import Str, DocumentMeta, ResourceMeta, Relationship
class TestGenericRelationshipField:
def test_serialize_relationship_link(self, post):
field = Relationship(
"http://example.com/posts/{id}/comments", related_url_kwargs={"id": "<id>"}
)
result = field.serialize("comments", post)
assert field.serialize("comments", post)
related = result["links"]["related"]
assert related == f"http://example.com/posts/{post.id}/comments"
def test_serialize_self_link(self, post):
field = Relationship(
self_url="http://example.com/posts/{id}/relationships/comments",
self_url_kwargs={"id": "<id>"},
)
result = field.serialize("comments", post)
related = result["links"]["self"]
assert "related" not in result["links"]
assert related == "http://example.com/posts/{id}/relationships/comments".format(
id=post.id
)
def test_include_resource_linkage_requires_type(self):
with pytest.raises(ValueError) as excinfo:
Relationship(
related_url="/posts/{post_id}",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
)
assert (
excinfo.value.args[0]
== "include_resource_linkage=True requires the type_ argument."
)
def test_include_resource_linkage_single(self, post):
field = Relationship(
related_url="/posts/{post_id}/author/",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
type_="people",
)
result = field.serialize("author", post)
assert "data" in result
assert result["data"]
assert result["data"]["id"] == str(post.author.id)
def test_include_resource_linkage_single_with_schema(self, post):
field = Relationship(
related_url="/posts/{post_id}/author/",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
type_="people",
schema="PostSchema",
)
result = field.serialize("author", post)
assert "data" in result
assert result["data"]
assert result["data"]["id"] == str(post.author.id)
def test_include_resource_linkage_single_foreign_key(self, post):
field = Relationship(
related_url="/posts/{post_id}/author/",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
type_="people",
)
result = field.serialize("author_id", post)
assert result["data"]["id"] == str(post.author_id)
def test_include_resource_linkage_single_foreign_key_with_schema(self, post):
field = Relationship(
related_url="/posts/{post_id}/author/",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
type_="people",
schema="PostSchema",
)
result = field.serialize("author_id", post)
assert result["data"]["id"] == str(post.author_id)
def test_include_resource_linkage_id_field_from_string(self):
field = Relationship(
include_resource_linkage=True, type_="authors", id_field="name"
)
result = field.serialize("author", {"author": {"name": "Ray Bradbury"}})
assert "data" in result
assert result["data"]
assert result["data"]["id"] == "Ray Bradbury"
def test_include_resource_linkage_id_field_from_schema(self):
class AuthorSchema(Schema):
id = Str(attribute="name")
class Meta:
type_ = "authors"
strict = True
field = Relationship(
include_resource_linkage=True, type_="authors", schema=AuthorSchema
)
result = field.serialize("author", {"author": {"name": "Ray Bradbury"}})
assert "data" in result
assert result["data"]
assert result["data"]["id"] == "Ray Bradbury"
def test_include_resource_linkage_many(self, post):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=True,
type_="comments",
)
result = field.serialize("comments", post)
assert "data" in result
ids = [each["id"] for each in result["data"]]
assert ids == [str(each.id) for each in post.comments]
def test_include_resource_linkage_many_with_schema(self, post):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=True,
type_="comments",
schema="CommentSchema",
)
result = field.serialize("comments", post)
assert "data" in result
ids = [each["id"] for each in result["data"]]
assert ids == [str(each.id) for each in post.comments]
def test_include_resource_linkage_many_with_schema_overriding_get_attribute(
self, post
):
field = Relationship(
related_url="/posts/{post_id}/keywords",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=True,
type_="keywords",
schema="KeywordSchema",
)
result = field.serialize("keywords", post)
assert "data" in result
ids = [each["id"] for each in result["data"]]
assert ids == [
md5(each.keyword.encode("utf-8")).hexdigest() for each in post.keywords
]
def test_deserialize_data_single(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=False,
include_resource_linkage=True,
type_="comments",
)
value = {"data": {"type": "comments", "id": "1"}}
result = field.deserialize(value)
assert result == "1"
def test_deserialize_data_many(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=True,
type_="comments",
)
value = {"data": [{"type": "comments", "id": "1"}]}
result = field.deserialize(value)
assert result == ["1"]
def test_deserialize_data_missing_id(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=False,
include_resource_linkage=True,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
value = {"data": {"type": "comments"}}
field.deserialize(value)
assert excinfo.value.args[0] == ["Must have an `id` field"]
def test_deserialize_data_missing_type(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=False,
include_resource_linkage=True,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
value = {"data": {"id": "1"}}
field.deserialize(value)
assert excinfo.value.args[0] == ["Must have a `type` field"]
def test_deserialize_data_incorrect_type(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=False,
include_resource_linkage=True,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
value = {"data": {"type": "posts", "id": "1"}}
field.deserialize(value)
assert excinfo.value.args[0] == ["Invalid `type` specified"]
def test_deserialize_null_data_value(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
allow_none=True,
many=False,
include_resource_linkage=False,
type_="comments",
)
result = field.deserialize({"data": None})
assert result is None
def test_deserialize_null_value_disallow_none(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
allow_none=False,
many=False,
include_resource_linkage=False,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"data": None})
assert excinfo.value.args[0] == "Field may not be null."
def test_deserialize_empty_data_list(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=False,
type_="comments",
)
result = field.deserialize({"data": []})
assert result == []
def test_deserialize_empty_data(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=False,
include_resource_linkage=False,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"data": {}})
assert excinfo.value.args[0] == [
"Must have an `id` field",
"Must have a `type` field",
]
def test_deserialize_required_missing(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
required=True,
many=False,
include_resource_linkage=True,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(missing_)
assert excinfo.value.args[0] == "Missing data for required field."
def test_deserialize_required_empty(self):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
required=True,
many=False,
include_resource_linkage=False,
type_="comments",
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize({})
assert excinfo.value.args[0] == "Must include a `data` key"
def test_deserialize_many_non_list_relationship(self):
field = Relationship(many=True, include_resource_linkage=True, type_="comments")
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"data": "1"})
assert excinfo.value.args[0] == "Relationship is list-like"
def test_deserialize_non_many_list_relationship(self):
field = Relationship(
many=False, include_resource_linkage=True, type_="comments"
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"data": ["1"]})
assert excinfo.value.args[0] == "Relationship is not list-like"
def test_include_null_data_single(self, post_with_null_author):
field = Relationship(
related_url="posts/{post_id}/author",
related_url_kwargs={"post_id": "<id>"},
include_resource_linkage=True,
type_="people",
)
result = field.serialize("author", post_with_null_author)
assert result and result["links"]["related"]
assert result["data"] is None
def test_include_null_data_many(self, post_with_null_comment):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=True,
type_="comments",
)
result = field.serialize("comments", post_with_null_comment)
assert result and result["links"]["related"]
assert result["data"] == []
def test_exclude_data(self, post_with_null_comment):
field = Relationship(
related_url="/posts/{post_id}/comments",
related_url_kwargs={"post_id": "<id>"},
many=True,
include_resource_linkage=False,
type_="comments",
)
result = field.serialize("comments", post_with_null_comment)
assert result and result["links"]["related"]
assert "data" not in result
def test_empty_relationship_with_alternative_identifier_field(
self, post_with_null_author
):
field = Relationship(
related_url="/authors/{author_id}",
related_url_kwargs={"author_id": "<author.last_name>"},
default=None,
)
result = field.serialize("author", post_with_null_author)
assert not result
def test_resource_linkage_id_type_from_schema(self):
class AuthorSchema(Schema):
id = Int(attribute="author_id", as_string=True)
class Meta:
type_ = "authors"
strict = True
field = Relationship(
include_resource_linkage=True, type_="authors", schema=AuthorSchema
)
result = field.deserialize({"data": {"type": "authors", "id": "1"}})
assert result == 1
def test_resource_linkage_id_of_invalid_type(self):
class AuthorSchema(Schema):
id = Int(attribute="author_id", as_string=True)
class Meta:
type_ = "authors"
strict = True
field = Relationship(
include_resource_linkage=True, type_="authors", schema=AuthorSchema
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"data": {"type": "authors", "id": "not_a_number"}})
assert excinfo.value.args[0] == "Not a valid integer."
class TestDocumentMetaField:
def test_serialize(self):
field = DocumentMeta()
result = field.serialize(
"document_meta", {"document_meta": {"page": {"offset": 1}}}
)
assert result == {"page": {"offset": 1}}
def test_serialize_incorrect_type(self):
field = DocumentMeta()
with pytest.raises(ValidationError) as excinfo:
field.serialize("document_meta", {"document_meta": 1})
assert excinfo.value.args[0] == "Not a valid mapping type."
def test_deserialize(self):
field = DocumentMeta()
value = {"page": {"offset": 1}}
result = field.deserialize(value)
assert result == value
def test_deserialize_incorrect_type(self):
field = DocumentMeta()
value = 1
with pytest.raises(ValidationError) as excinfo:
field.deserialize(value)
assert excinfo.value.args[0] == "Not a valid mapping type."
class TestResourceMetaField:
def test_serialize(self):
field = ResourceMeta()
result = field.serialize("resource_meta", {"resource_meta": {"active": True}})
assert result == {"active": True}
def test_serialize_incorrect_type(self):
field = ResourceMeta()
with pytest.raises(ValidationError) as excinfo:
field.serialize("resource_meta", {"resource_meta": True})
assert excinfo.value.args[0] == "Not a valid mapping type."
def test_deserialize(self):
field = ResourceMeta()
value = {"active": True}
result = field.deserialize(value)
assert result == value
def test_deserialize_incorrect_type(self):
field = ResourceMeta()
value = True
with pytest.raises(ValidationError) as excinfo:
field.deserialize(value)
assert excinfo.value.args[0] == "Not a valid mapping type."
|
marshmallow-code/marshmallow-jsonapi
|
tests/test_fields.py
|
Python
|
mit
| 16,428
|
import collections
from syn.base_utils import istr, getfunc
from syn.type.a import Schema, List
from .base import Base, Harvester
from .meta import Attr, pre_create_hook, preserve_attr_data
#-------------------------------------------------------------------------------
# Constants
_LIST = '_list'
#-------------------------------------------------------------------------------
# ListWrapper
class ListWrapper(Base, Harvester):
_attrs = dict(_list = Attr(list, internal=True,
groups=('str_exclude', 'copy_copy'),
doc='The wrapped list'))
_opts = dict(max_len = None,
min_len = None)
def __init__(self, *args, **kwargs):
max_len = self._opts.max_len
ltype = self._attrs.types[_LIST]
_args = self._opts.args
if max_len is None or not _args:
_list = ltype.coerce(list(args), seq_type_only=True)
args = ()
else:
_list = ltype.coerce(list(args[:max_len]), seq_type_only=True)
args = args[max_len:]
_list.extend(kwargs.get(_LIST, ltype.coerce([], seq_type_only=True)))
kwargs[_LIST] = _list
super(ListWrapper, self).__init__(*args, **kwargs)
@pre_create_hook
def _harvest_attrs(clsdata):
getfunc(Harvester._harvest_attrs)(clsdata)
dct = {}
clsdct = clsdata['dct']
attrs = clsdct.get('_attrs', {})
types = clsdct.get('types', [])
schema = clsdct.get('schema', None)
if types and schema:
raise TypeError('Cannot specify both types and schema in {}'
.format(clsdata['clsname']))
if types:
dct['_list'] = Attr(List(tuple(types)))
elif schema:
dct['_list'] = Attr(Schema(schema))
preserve_attr_data(attrs, dct)
attrs.update(dct)
clsdct['_attrs'] = attrs
def _istr_attrs(self, base, pretty, indent):
attrs = super(ListWrapper, self)._istr_attrs(base, pretty, indent)
strs = [istr(val, pretty, indent) for val in self]
ret = base.join(strs)
if ret:
ret = base.join([ret, attrs])
else:
ret = attrs
return ret
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __getitem__(self, item):
return self._list[item]
def __setitem__(self, item, value):
self._list[item] = value
def __delitem__(self, item):
del self._list[item]
@classmethod
def _generate(cls, **kwargs):
_list = []
typ = cls._attrs['_list'].type
kwargs['attrs'] = kwargs.get('attrs', {})
if cls._opts.max_len is not None:
max_len = cls._opts.max_len
if 'max_len' in kwargs:
if kwargs['max_len'] < max_len:
max_len = kwargs['max_len']
else:
max_len = kwargs.get('max_len', cls._opts.min_len)
kwargs_ = dict(kwargs)
if isinstance(typ, Schema):
_list = typ.generate(**kwargs_)
elif cls._opts.min_len:
kwargs_['min_len'] = cls._opts.min_len
kwargs_['max_len'] = max_len
_list = typ.generate(**kwargs_)
kwargs['attrs']['_list'] = _list
return super(ListWrapper, cls)._generate(**kwargs)
def append(self, item):
self._list.append(item)
def count(self, item):
return self._list.count(item)
def extend(self, items):
self._list.extend(items)
def index(self, item):
return self._list.index(item)
def insert(self, index, item):
self._list.insert(index, item)
def pop(self, index=-1):
self._list.pop(index)
def remove(self, item):
self._list.remove(item)
def reverse(self):
self._list.reverse()
def sort(self, *args, **kwargs):
self._list.sort(*args, **kwargs)
def validate(self):
super(ListWrapper, self).validate()
max_len = self._opts.max_len
min_len = self._opts.min_len
if max_len is not None:
if len(self._list) > max_len:
raise ValueError("can have at most %d elemsents; (got %d)"
% (max_len, len(self._list)))
if min_len is not None:
if len(self._list) < min_len:
raise ValueError("must have at least %d elemsents; (got %d)"
% (min_len, len(self._list)))
collections.MutableSequence.register(ListWrapper)
#-------------------------------------------------------------------------------
# __all__
__all__ = ('ListWrapper',)
#-------------------------------------------------------------------------------
|
mbodenhamer/syn
|
syn/base/b/wrapper.py
|
Python
|
mit
| 4,871
|
import calendar
import codecs
import collections
import mmap
import os
import re
import time
import zlib
# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set
# on page 656
def encode_text(s):
return codecs.BOM_UTF16_BE + s.encode("utf_16_be")
PDFDocEncoding = {
0x16: "\u0017",
0x18: "\u02D8",
0x19: "\u02C7",
0x1A: "\u02C6",
0x1B: "\u02D9",
0x1C: "\u02DD",
0x1D: "\u02DB",
0x1E: "\u02DA",
0x1F: "\u02DC",
0x80: "\u2022",
0x81: "\u2020",
0x82: "\u2021",
0x83: "\u2026",
0x84: "\u2014",
0x85: "\u2013",
0x86: "\u0192",
0x87: "\u2044",
0x88: "\u2039",
0x89: "\u203A",
0x8A: "\u2212",
0x8B: "\u2030",
0x8C: "\u201E",
0x8D: "\u201C",
0x8E: "\u201D",
0x8F: "\u2018",
0x90: "\u2019",
0x91: "\u201A",
0x92: "\u2122",
0x93: "\uFB01",
0x94: "\uFB02",
0x95: "\u0141",
0x96: "\u0152",
0x97: "\u0160",
0x98: "\u0178",
0x99: "\u017D",
0x9A: "\u0131",
0x9B: "\u0142",
0x9C: "\u0153",
0x9D: "\u0161",
0x9E: "\u017E",
0xA0: "\u20AC",
}
def decode_text(b):
if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be")
else:
return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b)
class PdfFormatError(RuntimeError):
"""An error that probably indicates a syntactic or semantic error in the
PDF file structure"""
pass
def check_format_condition(condition, error_message):
if not condition:
raise PdfFormatError(error_message)
class IndirectReference(
collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"])
):
def __str__(self):
return "%s %s R" % self
def __bytes__(self):
return self.__str__().encode("us-ascii")
def __eq__(self, other):
return (
other.__class__ is self.__class__
and other.object_id == self.object_id
and other.generation == self.generation
)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.object_id, self.generation))
class IndirectObjectDef(IndirectReference):
def __str__(self):
return "%s %s obj" % self
class XrefTable:
def __init__(self):
self.existing_entries = {} # object ID => (offset, generation)
self.new_entries = {} # object ID => (offset, generation)
self.deleted_entries = {0: 65536} # object ID => generation
self.reading_finished = False
def __setitem__(self, key, value):
if self.reading_finished:
self.new_entries[key] = value
else:
self.existing_entries[key] = value
if key in self.deleted_entries:
del self.deleted_entries[key]
def __getitem__(self, key):
try:
return self.new_entries[key]
except KeyError:
return self.existing_entries[key]
def __delitem__(self, key):
if key in self.new_entries:
generation = self.new_entries[key][1] + 1
del self.new_entries[key]
self.deleted_entries[key] = generation
elif key in self.existing_entries:
generation = self.existing_entries[key][1] + 1
self.deleted_entries[key] = generation
elif key in self.deleted_entries:
generation = self.deleted_entries[key]
else:
raise IndexError(
"object ID " + str(key) + " cannot be deleted because it doesn't exist"
)
def __contains__(self, key):
return key in self.existing_entries or key in self.new_entries
def __len__(self):
return len(
set(self.existing_entries.keys())
| set(self.new_entries.keys())
| set(self.deleted_entries.keys())
)
def keys(self):
return (
set(self.existing_entries.keys()) - set(self.deleted_entries.keys())
) | set(self.new_entries.keys())
def write(self, f):
keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys()))
deleted_keys = sorted(set(self.deleted_entries.keys()))
startxref = f.tell()
f.write(b"xref\n")
while keys:
# find a contiguous sequence of object IDs
prev = None
for index, key in enumerate(keys):
if prev is None or prev + 1 == key:
prev = key
else:
contiguous_keys = keys[:index]
keys = keys[index:]
break
else:
contiguous_keys = keys
keys = None
f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys)))
for object_id in contiguous_keys:
if object_id in self.new_entries:
f.write(b"%010d %05d n \n" % self.new_entries[object_id])
else:
this_deleted_object_id = deleted_keys.pop(0)
check_format_condition(
object_id == this_deleted_object_id,
"expected the next deleted object ID to be %s, instead found %s"
% (object_id, this_deleted_object_id),
)
try:
next_in_linked_list = deleted_keys[0]
except IndexError:
next_in_linked_list = 0
f.write(
b"%010d %05d f \n"
% (next_in_linked_list, self.deleted_entries[object_id])
)
return startxref
class PdfName:
def __init__(self, name):
if isinstance(name, PdfName):
self.name = name.name
elif isinstance(name, bytes):
self.name = name
else:
self.name = name.encode("us-ascii")
def name_as_str(self):
return self.name.decode("us-ascii")
def __eq__(self, other):
return (
isinstance(other, PdfName) and other.name == self.name
) or other == self.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return "PdfName(%s)" % repr(self.name)
@classmethod
def from_pdf_stream(cls, data):
return cls(PdfParser.interpret_name(data))
allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"}
def __bytes__(self):
result = bytearray(b"/")
for b in self.name:
if b in self.allowed_chars:
result.append(b)
else:
result.extend(b"#%02X" % b)
return bytes(result)
class PdfArray(list):
def __bytes__(self):
return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]"
class PdfDict(collections.UserDict):
def __setattr__(self, key, value):
if key == "data":
collections.UserDict.__setattr__(self, key, value)
else:
self[key.encode("us-ascii")] = value
def __getattr__(self, key):
try:
value = self[key.encode("us-ascii")]
except KeyError as e:
raise AttributeError(key) from e
if isinstance(value, bytes):
value = decode_text(value)
if key.endswith("Date"):
if value.startswith("D:"):
value = value[2:]
relationship = "Z"
if len(value) > 17:
relationship = value[14]
offset = int(value[15:17]) * 60
if len(value) > 20:
offset += int(value[18:20])
format = "%Y%m%d%H%M%S"[: len(value) - 2]
value = time.strptime(value[: len(format) + 2], format)
if relationship in ["+", "-"]:
offset *= 60
if relationship == "+":
offset *= -1
value = time.gmtime(calendar.timegm(value) + offset)
return value
def __bytes__(self):
out = bytearray(b"<<")
for key, value in self.items():
if value is None:
continue
value = pdf_repr(value)
out.extend(b"\n")
out.extend(bytes(PdfName(key)))
out.extend(b" ")
out.extend(value)
out.extend(b"\n>>")
return bytes(out)
class PdfBinary:
def __init__(self, data):
self.data = data
def __bytes__(self):
return b"<%s>" % b"".join(b"%02X" % b for b in self.data)
class PdfStream:
def __init__(self, dictionary, buf):
self.dictionary = dictionary
self.buf = buf
def decode(self):
try:
filter = self.dictionary.Filter
except AttributeError:
return self.buf
if filter == b"FlateDecode":
try:
expected_length = self.dictionary.DL
except AttributeError:
expected_length = self.dictionary.Length
return zlib.decompress(self.buf, bufsize=int(expected_length))
else:
raise NotImplementedError(
"stream filter %s unknown/unsupported" % repr(self.dictionary.Filter)
)
def pdf_repr(x):
if x is True:
return b"true"
elif x is False:
return b"false"
elif x is None:
return b"null"
elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)):
return bytes(x)
elif isinstance(x, int):
return str(x).encode("us-ascii")
elif isinstance(x, time.struct_time):
return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")"
elif isinstance(x, dict):
return bytes(PdfDict(x))
elif isinstance(x, list):
return bytes(PdfArray(x))
elif isinstance(x, str):
return pdf_repr(encode_text(x))
elif isinstance(x, bytes):
# XXX escape more chars? handle binary garbage
x = x.replace(b"\\", b"\\\\")
x = x.replace(b"(", b"\\(")
x = x.replace(b")", b"\\)")
return b"(" + x + b")"
else:
return bytes(x)
class PdfParser:
"""Based on
https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf
Supports PDF up to 1.4
"""
def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"):
if buf and f:
raise RuntimeError("specify buf or f or filename, but not both buf and f")
self.filename = filename
self.buf = buf
self.f = f
self.start_offset = start_offset
self.should_close_buf = False
self.should_close_file = False
if filename is not None and f is None:
self.f = f = open(filename, mode)
self.should_close_file = True
if f is not None:
self.buf = buf = self.get_buf_from_file(f)
self.should_close_buf = True
if not filename and hasattr(f, "name"):
self.filename = f.name
self.cached_objects = {}
if buf:
self.read_pdf_info()
else:
self.file_size_total = self.file_size_this = 0
self.root = PdfDict()
self.root_ref = None
self.info = PdfDict()
self.info_ref = None
self.page_tree_root = {}
self.pages = []
self.orig_pages = []
self.pages_ref = None
self.last_xref_section_offset = None
self.trailer_dict = {}
self.xref_table = XrefTable()
self.xref_table.reading_finished = True
if f:
self.seek_end()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False # do not suppress exceptions
def start_writing(self):
self.close_buf()
self.seek_end()
def close_buf(self):
try:
self.buf.close()
except AttributeError:
pass
self.buf = None
def close(self):
if self.should_close_buf:
self.close_buf()
if self.f is not None and self.should_close_file:
self.f.close()
self.f = None
def seek_end(self):
self.f.seek(0, os.SEEK_END)
def write_header(self):
self.f.write(b"%PDF-1.4\n")
def write_comment(self, s):
self.f.write(("% {}\n".format(s)).encode("utf-8"))
def write_catalog(self):
self.del_root()
self.root_ref = self.next_object_id(self.f.tell())
self.pages_ref = self.next_object_id(0)
self.rewrite_pages()
self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref)
self.write_obj(
self.pages_ref,
Type=PdfName(b"Pages"),
Count=len(self.pages),
Kids=self.pages,
)
return self.root_ref
def rewrite_pages(self):
pages_tree_nodes_to_delete = []
for i, page_ref in enumerate(self.orig_pages):
page_info = self.cached_objects[page_ref]
del self.xref_table[page_ref.object_id]
pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")])
if page_ref not in self.pages:
# the page has been deleted
continue
# make dict keys into strings for passing to write_page
stringified_page_info = {}
for key, value in page_info.items():
# key should be a PdfName
stringified_page_info[key.name_as_str()] = value
stringified_page_info["Parent"] = self.pages_ref
new_page_ref = self.write_page(None, **stringified_page_info)
for j, cur_page_ref in enumerate(self.pages):
if cur_page_ref == page_ref:
# replace the page reference with the new one
self.pages[j] = new_page_ref
# delete redundant Pages tree nodes from xref table
for pages_tree_node_ref in pages_tree_nodes_to_delete:
while pages_tree_node_ref:
pages_tree_node = self.cached_objects[pages_tree_node_ref]
if pages_tree_node_ref.object_id in self.xref_table:
del self.xref_table[pages_tree_node_ref.object_id]
pages_tree_node_ref = pages_tree_node.get(b"Parent", None)
self.orig_pages = []
def write_xref_and_trailer(self, new_root_ref=None):
if new_root_ref:
self.del_root()
self.root_ref = new_root_ref
if self.info:
self.info_ref = self.write_obj(None, self.info)
start_xref = self.xref_table.write(self.f)
num_entries = len(self.xref_table)
trailer_dict = {b"Root": self.root_ref, b"Size": num_entries}
if self.last_xref_section_offset is not None:
trailer_dict[b"Prev"] = self.last_xref_section_offset
if self.info:
trailer_dict[b"Info"] = self.info_ref
self.last_xref_section_offset = start_xref
self.f.write(
b"trailer\n"
+ bytes(PdfDict(trailer_dict))
+ b"\nstartxref\n%d\n%%%%EOF" % start_xref
)
def write_page(self, ref, *objs, **dict_obj):
if isinstance(ref, int):
ref = self.pages[ref]
if "Type" not in dict_obj:
dict_obj["Type"] = PdfName(b"Page")
if "Parent" not in dict_obj:
dict_obj["Parent"] = self.pages_ref
return self.write_obj(ref, *objs, **dict_obj)
def write_obj(self, ref, *objs, **dict_obj):
f = self.f
if ref is None:
ref = self.next_object_id(f.tell())
else:
self.xref_table[ref.object_id] = (f.tell(), ref.generation)
f.write(bytes(IndirectObjectDef(*ref)))
stream = dict_obj.pop("stream", None)
if stream is not None:
dict_obj["Length"] = len(stream)
if dict_obj:
f.write(pdf_repr(dict_obj))
for obj in objs:
f.write(pdf_repr(obj))
if stream is not None:
f.write(b"stream\n")
f.write(stream)
f.write(b"\nendstream\n")
f.write(b"endobj\n")
return ref
def del_root(self):
if self.root_ref is None:
return
del self.xref_table[self.root_ref.object_id]
del self.xref_table[self.root[b"Pages"].object_id]
@staticmethod
def get_buf_from_file(f):
if hasattr(f, "getbuffer"):
return f.getbuffer()
elif hasattr(f, "getvalue"):
return f.getvalue()
else:
try:
return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
except ValueError: # cannot mmap an empty file
return b""
def read_pdf_info(self):
self.file_size_total = len(self.buf)
self.file_size_this = self.file_size_total - self.start_offset
self.read_trailer()
self.root_ref = self.trailer_dict[b"Root"]
self.info_ref = self.trailer_dict.get(b"Info", None)
self.root = PdfDict(self.read_indirect(self.root_ref))
if self.info_ref is None:
self.info = PdfDict()
else:
self.info = PdfDict(self.read_indirect(self.info_ref))
check_format_condition(b"Type" in self.root, "/Type missing in Root")
check_format_condition(
self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog"
)
check_format_condition(b"Pages" in self.root, "/Pages missing in Root")
check_format_condition(
isinstance(self.root[b"Pages"], IndirectReference),
"/Pages in Root is not an indirect reference",
)
self.pages_ref = self.root[b"Pages"]
self.page_tree_root = self.read_indirect(self.pages_ref)
self.pages = self.linearize_page_tree(self.page_tree_root)
# save the original list of page references
# in case the user modifies, adds or deletes some pages
# and we need to rewrite the pages and their list
self.orig_pages = self.pages[:]
def next_object_id(self, offset=None):
try:
# TODO: support reuse of deleted objects
reference = IndirectReference(max(self.xref_table.keys()) + 1, 0)
except ValueError:
reference = IndirectReference(1, 0)
if offset is not None:
self.xref_table[reference.object_id] = (offset, 0)
return reference
delimiter = br"[][()<>{}/%]"
delimiter_or_ws = br"[][()<>{}/%\000\011\012\014\015\040]"
whitespace = br"[\000\011\012\014\015\040]"
whitespace_or_hex = br"[\000\011\012\014\015\0400-9a-fA-F]"
whitespace_optional = whitespace + b"*"
whitespace_mandatory = whitespace + b"+"
newline_only = br"[\r\n]+"
newline = whitespace_optional + newline_only + whitespace_optional
re_trailer_end = re.compile(
whitespace_mandatory
+ br"trailer"
+ whitespace_optional
+ br"\<\<(.*\>\>)"
+ newline
+ br"startxref"
+ newline
+ br"([0-9]+)"
+ newline
+ br"%%EOF"
+ whitespace_optional
+ br"$",
re.DOTALL,
)
re_trailer_prev = re.compile(
whitespace_optional
+ br"trailer"
+ whitespace_optional
+ br"\<\<(.*?\>\>)"
+ newline
+ br"startxref"
+ newline
+ br"([0-9]+)"
+ newline
+ br"%%EOF"
+ whitespace_optional,
re.DOTALL,
)
def read_trailer(self):
search_start_offset = len(self.buf) - 16384
if search_start_offset < self.start_offset:
search_start_offset = self.start_offset
m = self.re_trailer_end.search(self.buf, search_start_offset)
check_format_condition(m, "trailer end not found")
# make sure we found the LAST trailer
last_match = m
while m:
last_match = m
m = self.re_trailer_end.search(self.buf, m.start() + 16)
if not m:
m = last_match
trailer_data = m.group(1)
self.last_xref_section_offset = int(m.group(2))
self.trailer_dict = self.interpret_trailer(trailer_data)
self.xref_table = XrefTable()
self.read_xref_table(xref_section_offset=self.last_xref_section_offset)
if b"Prev" in self.trailer_dict:
self.read_prev_trailer(self.trailer_dict[b"Prev"])
def read_prev_trailer(self, xref_section_offset):
trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset)
m = self.re_trailer_prev.search(
self.buf[trailer_offset : trailer_offset + 16384]
)
check_format_condition(m, "previous trailer not found")
trailer_data = m.group(1)
check_format_condition(
int(m.group(2)) == xref_section_offset,
"xref section offset in previous trailer doesn't match what was expected",
)
trailer_dict = self.interpret_trailer(trailer_data)
if b"Prev" in trailer_dict:
self.read_prev_trailer(trailer_dict[b"Prev"])
re_whitespace_optional = re.compile(whitespace_optional)
re_name = re.compile(
whitespace_optional
+ br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?="
+ delimiter_or_ws
+ br")"
)
re_dict_start = re.compile(whitespace_optional + br"\<\<")
re_dict_end = re.compile(whitespace_optional + br"\>\>" + whitespace_optional)
@classmethod
def interpret_trailer(cls, trailer_data):
trailer = {}
offset = 0
while True:
m = cls.re_name.match(trailer_data, offset)
if not m:
m = cls.re_dict_end.match(trailer_data, offset)
check_format_condition(
m and m.end() == len(trailer_data),
"name not found in trailer, remaining data: "
+ repr(trailer_data[offset:]),
)
break
key = cls.interpret_name(m.group(1))
value, offset = cls.get_value(trailer_data, m.end())
trailer[key] = value
check_format_condition(
b"Size" in trailer and isinstance(trailer[b"Size"], int),
"/Size not in trailer or not an integer",
)
check_format_condition(
b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference),
"/Root not in trailer or not an indirect reference",
)
return trailer
re_hashes_in_name = re.compile(br"([^#]*)(#([0-9a-fA-F]{2}))?")
@classmethod
def interpret_name(cls, raw, as_text=False):
name = b""
for m in cls.re_hashes_in_name.finditer(raw):
if m.group(3):
name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii"))
else:
name += m.group(1)
if as_text:
return name.decode("utf-8")
else:
return bytes(name)
re_null = re.compile(whitespace_optional + br"null(?=" + delimiter_or_ws + br")")
re_true = re.compile(whitespace_optional + br"true(?=" + delimiter_or_ws + br")")
re_false = re.compile(whitespace_optional + br"false(?=" + delimiter_or_ws + br")")
re_int = re.compile(
whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")"
)
re_real = re.compile(
whitespace_optional
+ br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?="
+ delimiter_or_ws
+ br")"
)
re_array_start = re.compile(whitespace_optional + br"\[")
re_array_end = re.compile(whitespace_optional + br"]")
re_string_hex = re.compile(
whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>"
)
re_string_lit = re.compile(whitespace_optional + br"\(")
re_indirect_reference = re.compile(
whitespace_optional
+ br"([-+]?[0-9]+)"
+ whitespace_mandatory
+ br"([-+]?[0-9]+)"
+ whitespace_mandatory
+ br"R(?="
+ delimiter_or_ws
+ br")"
)
re_indirect_def_start = re.compile(
whitespace_optional
+ br"([-+]?[0-9]+)"
+ whitespace_mandatory
+ br"([-+]?[0-9]+)"
+ whitespace_mandatory
+ br"obj(?="
+ delimiter_or_ws
+ br")"
)
re_indirect_def_end = re.compile(
whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")"
)
re_comment = re.compile(
br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*"
)
re_stream_start = re.compile(whitespace_optional + br"stream\r?\n")
re_stream_end = re.compile(
whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")"
)
@classmethod
def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1):
if max_nesting == 0:
return None, None
m = cls.re_comment.match(data, offset)
if m:
offset = m.end()
m = cls.re_indirect_def_start.match(data, offset)
if m:
check_format_condition(
int(m.group(1)) > 0,
"indirect object definition: object ID must be greater than 0",
)
check_format_condition(
int(m.group(2)) >= 0,
"indirect object definition: generation must be non-negative",
)
check_format_condition(
expect_indirect is None
or expect_indirect
== IndirectReference(int(m.group(1)), int(m.group(2))),
"indirect object definition different than expected",
)
object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1)
if offset is None:
return object, None
m = cls.re_indirect_def_end.match(data, offset)
check_format_condition(m, "indirect object definition end not found")
return object, m.end()
check_format_condition(
not expect_indirect, "indirect object definition not found"
)
m = cls.re_indirect_reference.match(data, offset)
if m:
check_format_condition(
int(m.group(1)) > 0,
"indirect object reference: object ID must be greater than 0",
)
check_format_condition(
int(m.group(2)) >= 0,
"indirect object reference: generation must be non-negative",
)
return IndirectReference(int(m.group(1)), int(m.group(2))), m.end()
m = cls.re_dict_start.match(data, offset)
if m:
offset = m.end()
result = {}
m = cls.re_dict_end.match(data, offset)
while not m:
key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
if offset is None:
return result, None
value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
result[key] = value
if offset is None:
return result, None
m = cls.re_dict_end.match(data, offset)
offset = m.end()
m = cls.re_stream_start.match(data, offset)
if m:
try:
stream_len = int(result[b"Length"])
except (TypeError, KeyError, ValueError) as e:
raise PdfFormatError(
"bad or missing Length in stream dict (%r)"
% result.get(b"Length", None)
) from e
stream_data = data[m.end() : m.end() + stream_len]
m = cls.re_stream_end.match(data, m.end() + stream_len)
check_format_condition(m, "stream end not found")
offset = m.end()
result = PdfStream(PdfDict(result), stream_data)
else:
result = PdfDict(result)
return result, offset
m = cls.re_array_start.match(data, offset)
if m:
offset = m.end()
result = []
m = cls.re_array_end.match(data, offset)
while not m:
value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1)
result.append(value)
if offset is None:
return result, None
m = cls.re_array_end.match(data, offset)
return result, m.end()
m = cls.re_null.match(data, offset)
if m:
return None, m.end()
m = cls.re_true.match(data, offset)
if m:
return True, m.end()
m = cls.re_false.match(data, offset)
if m:
return False, m.end()
m = cls.re_name.match(data, offset)
if m:
return PdfName(cls.interpret_name(m.group(1))), m.end()
m = cls.re_int.match(data, offset)
if m:
return int(m.group(1)), m.end()
m = cls.re_real.match(data, offset)
if m:
# XXX Decimal instead of float???
return float(m.group(1)), m.end()
m = cls.re_string_hex.match(data, offset)
if m:
# filter out whitespace
hex_string = bytearray(
[b for b in m.group(1) if b in b"0123456789abcdefABCDEF"]
)
if len(hex_string) % 2 == 1:
# append a 0 if the length is not even - yes, at the end
hex_string.append(ord(b"0"))
return bytearray.fromhex(hex_string.decode("us-ascii")), m.end()
m = cls.re_string_lit.match(data, offset)
if m:
return cls.get_literal_string(data, m.end())
# return None, offset # fallback (only for debugging)
raise PdfFormatError("unrecognized object: " + repr(data[offset : offset + 32]))
re_lit_str_token = re.compile(
br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))"
)
escaped_chars = {
b"n": b"\n",
b"r": b"\r",
b"t": b"\t",
b"b": b"\b",
b"f": b"\f",
b"(": b"(",
b")": b")",
b"\\": b"\\",
ord(b"n"): b"\n",
ord(b"r"): b"\r",
ord(b"t"): b"\t",
ord(b"b"): b"\b",
ord(b"f"): b"\f",
ord(b"("): b"(",
ord(b")"): b")",
ord(b"\\"): b"\\",
}
@classmethod
def get_literal_string(cls, data, offset):
nesting_depth = 0
result = bytearray()
for m in cls.re_lit_str_token.finditer(data, offset):
result.extend(data[offset : m.start()])
if m.group(1):
result.extend(cls.escaped_chars[m.group(1)[1]])
elif m.group(2):
result.append(int(m.group(2)[1:], 8))
elif m.group(3):
pass
elif m.group(5):
result.extend(b"\n")
elif m.group(6):
result.extend(b"(")
nesting_depth += 1
elif m.group(7):
if nesting_depth == 0:
return bytes(result), m.end()
result.extend(b")")
nesting_depth -= 1
offset = m.end()
raise PdfFormatError("unfinished literal string")
re_xref_section_start = re.compile(whitespace_optional + br"xref" + newline)
re_xref_subsection_start = re.compile(
whitespace_optional
+ br"([0-9]+)"
+ whitespace_mandatory
+ br"([0-9]+)"
+ whitespace_optional
+ newline_only
)
re_xref_entry = re.compile(br"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)")
def read_xref_table(self, xref_section_offset):
subsection_found = False
m = self.re_xref_section_start.match(
self.buf, xref_section_offset + self.start_offset
)
check_format_condition(m, "xref section start not found")
offset = m.end()
while True:
m = self.re_xref_subsection_start.match(self.buf, offset)
if not m:
check_format_condition(
subsection_found, "xref subsection start not found"
)
break
subsection_found = True
offset = m.end()
first_object = int(m.group(1))
num_objects = int(m.group(2))
for i in range(first_object, first_object + num_objects):
m = self.re_xref_entry.match(self.buf, offset)
check_format_condition(m, "xref entry not found")
offset = m.end()
is_free = m.group(3) == b"f"
generation = int(m.group(2))
if not is_free:
new_entry = (int(m.group(1)), generation)
check_format_condition(
i not in self.xref_table or self.xref_table[i] == new_entry,
"xref entry duplicated (and not identical)",
)
self.xref_table[i] = new_entry
return offset
def read_indirect(self, ref, max_nesting=-1):
offset, generation = self.xref_table[ref[0]]
check_format_condition(
generation == ref[1],
"expected to find generation %s for object ID %s in xref table, "
"instead found generation %s at offset %s"
% (ref[1], ref[0], generation, offset),
)
value = self.get_value(
self.buf,
offset + self.start_offset,
expect_indirect=IndirectReference(*ref),
max_nesting=max_nesting,
)[0]
self.cached_objects[ref] = value
return value
def linearize_page_tree(self, node=None):
if node is None:
node = self.page_tree_root
check_format_condition(
node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages"
)
pages = []
for kid in node[b"Kids"]:
kid_object = self.read_indirect(kid)
if kid_object[b"Type"] == b"Page":
pages.append(kid)
else:
pages.extend(self.linearize_page_tree(node=kid_object))
return pages
|
sserrot/champion_relationships
|
venv/Lib/site-packages/PIL/PdfParser.py
|
Python
|
mit
| 34,422
|
import games
import handlers
import hlib.error
import hlib.input
from handlers import require_login, page
import hruntime # @UnresolvedImport
class Handler(handlers.GenericHandler):
class ValidateProfile(hlib.input.SchemaValidator):
username = hlib.input.Username()
@require_login
@page
@hlib.input.validate_by(schema = ValidateProfile)
def index(self, username = None):
if username not in hruntime.dbroot.users:
raise hlib.error.NoSuchUserError(username)
user = hruntime.dbroot.users[username]
gm = games.game_module('settlers', submodule = 'stats')
return self.generate('profile.mako', params = {'player': user, 'player_stats': gm.stats.player_stats[user.name] if user.name in gm.stats.player_stats else None})
|
happz/settlers
|
src/handlers/profile.py
|
Python
|
mit
| 757
|
from setuptools import setup
setup(
name='Central',
version='0.6.0',
packages=['central', 'central.config'],
url='https://github.com/viniciuschiele/central',
license='MIT',
author='Vinicius Chiele',
author_email='vinicius.chiele@gmail.com',
description='A dynamic configuration library',
keywords=['config', 'configuration', 'dynamic', 'file', 's3', 'aws', 'storage', 'reload'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
|
viniciuschiele/central
|
setup.py
|
Python
|
mit
| 863
|
import yaml
import json
from pprint import pprint
with open ("q6yaml.yml") as f:
yaml_data = yaml.load(f)
with open ("q6json.json") as ff:
json_data = json.load(ff)
seperator = "-" * 20
print seperator
pprint("YAML output")
print seperator
pprint(yaml_data)
print seperator
pprint("JSON output")
print seperator
pprint(json_data)
print seperator
|
jrgreenberg/jrgreenberg_PyNet
|
Week1/q7.py
|
Python
|
mit
| 355
|
#!/usr/bin/env python
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from time import strftime
try:
import configparser
except:
from six.moves import configparser
import argparse
# Note: You MUST at least enter valid AWS API Keys in this file:
CONFIG_FILE = 'es-s3-snapshot.conf'
# Usage/Setup:
# 1. Put your AWS creds in the CONFIG_FILE file located in the same subdirectory as this file
# (This step is MANDATORY)
#
# 2. Review the other config params in the CONFIG_FILE.
# The default params should be okay in most cases, but take a look to be sure.
def snapshot_indices_from_src_to_s3(config):
"""
Take a snapshot of all the indices specified in the config file.
The specified indices are backed up from the ElasticSearch Node on which backup is initiated
and are stored at the S3 location specified in the config file.
Parameters:
config: dictionary storing the configuration details
"""
src_seed1 = config['elasticsearch_config']['es_src_seed1']
es_s3_repo = config['elasticsearch_config']['es_repository_name']
try:
src_seed2 = config['elasticsearch_config']['es_src_seed2']
src_seed3 = config['elasticsearch_config']['es_src_seed3']
except KeyError: # running in test mode? use a single node
print ("\n[WARN] Only one SOURCE seed node found in the config, falling back to single SOURCE seed...")
src_seed2 = src_seed3 = src_seed1
try:
src_es = Elasticsearch([src_seed1, src_seed2, src_seed3], sniff_on_start=True,
sniff_on_connection_fail=True, sniffer_timeout=60)
print ("\n[INFO] Connected to src ES cluster: %s" %(src_es.info()))
src_es.snapshot.create_repository(repository=es_s3_repo,
body={
"type": "s3",
"settings": {
"region": config['aws_s3_config']['aws_region'],
"bucket": config['aws_s3_config']['s3_bucket_name'],
"base_path": config['aws_s3_config']['s3_base_path'],
"access_key": config['aws_api_keys']['aws_access_key'],
"secret_key": config['aws_api_keys']['aws_secret_key']
}
},
request_timeout=30,
verify=False)
print ("\n[INFO] Snapshotting ES indices: '%s' to S3...\n" %(config['elasticsearch_config']['index_names']))
src_es.snapshot.create(repository=es_s3_repo,
snapshot=config['elasticsearch_config']['snapshot_name'],
body={"indices": config['elasticsearch_config']['index_names']},
wait_for_completion=False)
except Exception as e:
print ("\n\n[ERROR] Unexpected error: %s" %(str(e)))
def restore_indices_from_s3_to_dest(config):
"""
Restore the specified indices from the snapshot specified in the config file.
The indices are restored at the specified 'dest' ElasticSearch Node.
ElasticSearch automatically replicates the indices across the ES cluster after the restore.
Parameters:
config: dictionary storing the configuration details
"""
dest_seed1 = config['elasticsearch_config']['es_dest_seed1']
es_s3_repo = config['elasticsearch_config']['es_repository_name']
index_list = config['elasticsearch_config']['index_names'].split(',')
try:
dest_seed2 = config['elasticsearch_config']['es_dest_seed2']
dest_seed3 = config['elasticsearch_config']['es_dest_seed3']
except KeyError: # running in test mode? use a single node
print ("\n[WARN] Are you running in test mode? Have you defined >1 dest node in the conf?")
print ("\n[WARN] Falling back to a single dest node...")
dest_seed2 = dest_seed3 = dest_seed1
try:
# specify all 3 dest ES nodes in the connection string
dest_es = Elasticsearch([dest_seed1, dest_seed2, dest_seed3], sniff_on_start=True,
sniff_on_connection_fail=True, sniffer_timeout=60)
dest_es.snapshot.create_repository(repository=es_s3_repo,
body={
"type": "s3",
"settings": {
"region": config['aws_s3_config']['aws_region'],
"bucket": config['aws_s3_config']['s3_bucket_name'],
"base_path": config['aws_s3_config']['s3_base_path'],
"access_key": config['aws_api_keys']['aws_access_key'],
"secret_key": config['aws_api_keys']['aws_secret_key']
}
},
request_timeout=30,
verify=False)
print ("\n[INFO] Connected to dest ES cluster: %s" %(dest_es.info()))
# must close indices before restoring:
for index in index_list:
try:
print ("[INFO] Closing index: '%s'" %(index))
dest_es.indices.close(index=index, ignore_unavailable=True)
except NotFoundError:
print ("\n\n[WARN] Index '%s' not present on Target ES cluster - could not close it." %(index))
except Exception as e:
print ("\n\n[ERROR] Unexpected error '%s' while trying to close index: '%s'" %(str(e)))
#reopen_indices(dest_es, index_list)
print ("\n[INFO] Restoring ES indices: '%s' from S3 snapshot...\n" %(config['elasticsearch_config']['index_names']))
dest_es.snapshot.restore(repository=es_s3_repo,
snapshot=config['elasticsearch_config']['snapshot_name'],
body={"indices": config['elasticsearch_config']['index_names']},
wait_for_completion=False)
except Exception as e:
print ("\n\n[ERROR] Unexpected error: %s" %(str(e)))
finally:
print ("\n[INFO] (finally) Re-opening indices: '%s'" %(str(index_list)))
reopen_indices(dest_es, index_list)
def reopen_indices(es, index_list):
"""
Re-open indices
(used to ensure indices are re-opened after any restore operation)
Parameters:
es : ElasticSearch connection object
index_list : List of ElasticSearch indices that needs to be open
"""
try:
for index in index_list:
print ("[INFO] reopen_indices(): Opening index: '%s'" %(index))
es.indices.open(index=index, ignore_unavailable=True)
except NotFoundError:
print ("\n\n[WARN] Could not reopen missing index on Target ES cluster: '%s'" %(index))
except Exception as e:
print ("\n\n[ERROR] Unexpected error in reopen_indices(): %s" %(str(e)))
def read_config():
"""
Parse the config file. Return a dictionary object containing the config.
"""
cfg = configparser.ConfigParser()
cfg.read(CONFIG_FILE)
# get a normal dictionary out of the configparser object
config = {section:{k:v for k,v in cfg.items(section)} for section in cfg.sections()}
return config
def main():
# parse command line args
parser = argparse.ArgumentParser(
description='Push specified Elasticsearch indices from SOURCE to DESTINATION as per config in the `es-s3-snapshot.conf` file.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-m', '--mode',
help="Mode of operation. Choose 'backup' on your SOURCE cluster. \
Choose 'restore' on your DESTINATION cluster",
choices=['backup','restore'], required=True)
args = parser.parse_args()
# parse config
config = read_config()
# set default value of snapshot_name if missing from config
if not 'snapshot_name' in config['elasticsearch_config']:
snapshot_name = 'snapshot-' + strftime("%Y_%m_%dT%H-%M-%S")
config['elasticsearch_config']['snapshot_name'] = snapshot_name
if args.mode == 'backup':
snapshot_indices_from_src_to_s3(config)
if args.mode == 'restore':
restore_indices_from_s3_to_dest(config)
print ('\n\n[All done!]')
if __name__ == "__main__":
main()
|
cldcvr/elasticsearch-s3-backup
|
es-s3-snapshot/es-s3-snapshot.py
|
Python
|
mit
| 8,107
|
#!/usr/bin/python
import logging
import struct
starttls_modes = {
21: 'ftp',
25: 'smtp',
110: 'pop3',
143: 'imap',
587: 'smtp',
38476: 'pgsql'
}
def starttls(s, port, mode='auto'):
logger = logging.getLogger('pytls')
logger.debug('Using %d, mode %s', port, mode)
if mode == 'auto':
if starttls_modes.has_key(port):
mode = starttls_modes[port]
else:
# No starttls
logger.debug('Not a starttls port')
return
if mode == 'none':
return
logger.debug('Using starttls mode %s', mode)
BUFSIZ = 1024 # Arbitrary
if mode == 'smtp':
s.recv(BUFSIZ)
s.send("EHLO sslchecker.westpoint.ltd.uk\r\n")
s.recv(BUFSIZ)
s.send("STARTTLS\r\n")
s.recv(BUFSIZ)
elif mode == 'pop3':
s.recv(BUFSIZ)
s.send("STLS\r\n")
s.recv(BUFSIZ)
elif mode == 'imap':
s.recv(BUFSIZ)
s.send("A0001 STARTTLS\r\n")
s.recv(BUFSIZ)
elif mode == 'ftp':
s.recv(BUFSIZ)
s.send("AUTH TLS\r\n")
s.recv(BUFSIZ)
elif mode == 'pgsql':
msg = struct.pack('BBBBBBBB', 0x00, 0x00, 0x00, 0x08, 0x04, 0xd2, 0x16, 0x2f)
s.send(msg)
s.recv(BUFSIZ)
else:
raise Exception('Unknown starttls mode, %s' % mode)
if __name__ == '__main__':
import sys
import socket
logging.basicConfig(level=logging.DEBUG)
host = sys.argv[1]
port = int(sys.argv[2])
if len(sys.argv) == 4:
mode = sys.argv[3]
else:
mode = 'auto'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.settimeout(5)
s.connect((host, port))
starttls(s, port, mode)
f = s.makefile('rw', 0)
|
WestpointLtd/pytls
|
tls/starttls.py
|
Python
|
mit
| 1,759
|
"""
Django settings for meal_api project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ii_j#dw+xfv5iu1*mz+v2!!tb9g%$w+^!55&2!s_d2qhpkj=9o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'locations_model',
'rest_framework',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meal_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meal_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# Update database configuration with $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'meal-test',
# 'USER': 'roboiris',
# 'PASSWORD': 'secret',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static')
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
Seattle-Meal-Maps/seattle-meal-maps-api
|
meal_api/meal_api/settings.py
|
Python
|
mit
| 3,895
|
#!/usr/bin/python
#
# Copyright (c) 2011 Rime Project.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from rime.core import commands
class Example(commands.CommandBase):
def __init__(self, parent):
super(Example, self).__init__(
'example',
'',
'Example command.',
'Example help.',
parent)
def Run(self, project, args, ui):
ui.console.Print('Hello, world!')
ui.console.Print()
ui.console.Print('Project:')
ui.console.Print(' %s' % repr(project))
ui.console.Print()
ui.console.Print('Parameters:')
for i, arg in enumerate(args):
ui.console.Print(' args[%s] = "%s"' % (i, arg))
ui.console.Print()
ui.console.Print('Options:')
for key, value in ui.options.items():
ui.console.Print(' options.%s = %s' % (key, value))
commands.registry.Add(Example)
|
AI-comp/Orientation2015Problems
|
rime/plugins/example.py
|
Python
|
mit
| 1,853
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostDisconnectedEvent(vim, *args, **kwargs):
'''This event records a disconnection from a host.'''
obj = vim.client.factory.create('ns0:HostDisconnectedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'reason', 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/host_disconnected_event.py
|
Python
|
mit
| 1,157
|
"""
circonus.tag
~~~~~~~~~~~~
Manipulate tags on resources that support them.
"""
from circonus.util import get_resource_from_cid
TAGGABLE_RESOURCES = [
"check_bundle",
"contact_group",
"graph",
"maintenance",
"metric_cluster",
"template",
"worksheet"
]
"""Circonus API resources for which tags can be modified."""
TAG_SEP = ":"
def _get_updated_tags(update_function, *args):
"""Get an updated list of tags.
:param update_function: The function used to update tags.
:param args: (The resource :py:class:`dict` to get updated tags for., The tags :py:class:`list` to update ``resource`` with.)
:rtype: :py:class:`list`
If the tag list update function modifies the existing tag list then that new list is returned. In all other cases
None is returned.
"""
updated_tags = None
resource, tags = args[:2]
existing_tags = resource.get("tags")
if existing_tags is not None:
existing_tags_set = set(existing_tags)
tags_set = set(tags)
updated_tags_set = update_function(existing_tags_set, tags_set)
if existing_tags_set != updated_tags_set:
updated_tags = list(updated_tags_set)
return updated_tags
def get_tag_string(tag, category=None):
"""Get a string representing ``tag``.
:param str tag: The tag.
:param str category: (optional) The category.
:rtype: :py:class:`str`
Circonus requires categorized tags to be a string of the form, "category:tag". Uncategorized tags are simply,
"tag".
"""
return TAG_SEP.join([category, tag]) if category else tag
def is_taggable(cid):
"""Is the resource represented by the given cid taggable?
:param str cid: The ``cid`` of a resource that may support tags.
:rtype: :py:class:`bool`
Only resources which support tagging via the Circonus API are considered taggable. Resources which have a
``_tags`` attribute are not considered taggable since the ``_tags`` list cannot be updated via the API -- it is
read-only.
"""
return get_resource_from_cid(cid) in TAGGABLE_RESOURCES
def get_tags_with(resource, tags):
"""Get the list of tags for ``resource`` with ``tags`` added to it.
:param dict resource: The resource with a ``tags`` key.
:param list tags: The tags to add to ``resource``.
:rtype: :py:class:`list` or :py:const:`None`
If ``tags`` changes the existing tags on the resource by adding new tags then that new list of tags is returned.
If ``tags`` does not change the existing tags on the resource then :py:const:`None` is returned.
All other failure states resulting from trying to add the list of tags to ``resource`` will return :py:const:`None`.
"""
return _get_updated_tags(set.union, resource, tags)
def get_tags_without(resource, tags):
"""Get the list of tags for ``resource`` with ``tags`` removed from it.
:param dict resource: The resource with a ``tags`` key.
:param list tags: The tags to remove from ``resource``.
:rtype: :py:class:`list` or :py:const:`None`
If ``tags`` changes the existing tags on the resource by removing new tags then that new list of tags is returned.
If ``tags`` does not change the existing tags on the resource then :py:const:`None` is returned.
All other failure states resulting from trying to remove the list of tags to ``resource`` will return
:py:const:`None`.
"""
return _get_updated_tags(set.difference, resource, tags)
def get_telemetry_tag(check_bundle):
"""Get a telemetry tag string for ``check_bundle``.
:param dict check_bundle: The check bundle to get a telemetry tag from.
:rtype: :py:class:`str`
If ``check_bundle`` has a ``type`` attribute, a tag of the form "telemetry:type" will be returned. This makes
filtering check bundles by the source of telemetry data easier in the Circonus UI.
"""
return get_tag_string(check_bundle["type"], "telemetry")
|
monetate/circonus
|
circonus/tag.py
|
Python
|
mit
| 3,972
|
from cloudcast.template import *
from cloudcast.library import stack_user
iSCMCompleteHandle = WaitConditionHandle()
iSCMComplete = WaitCondition(
Handle = iSCMCompleteHandle,
Timeout = "3600" # Be generous with time
)
iSCMData = Output(
Description = "Output provided by the iSCM process",
Value = iSCMComplete["Data"]
)
|
tuxpiper/raduga
|
raduga/cfn/build_stack.cfn.py
|
Python
|
mit
| 332
|
import cards
import hands
import preflop_sim
import afterflop_sim
import pickle
# add more features if we have time
features = { 'high-pair':0, 'middle-pair':1, 'low-pair':2, '2-pair-good':3, '3-kind':4, 'straight':5, 'flush':6, 'full-house':7, '4-kind':8, 'straight-flush':9, 'really-good-high':10, 'good-high':11, 'middle-high':12, 'bad-high':13, 'really-bad-high':14, '2-pair-bad':15 }
def getHandCode(herohand, table):
handscore = 0
herohandplus = herohand.list_rep() + table.list_rep()
evaluated = hands.evaluate_hand(herohandplus)
if evaluated[2] > 3:
handscore = evaluated[2]
elif evaluated[2] == 3:
high_pair = evaluated[1][0]
highest_card = True
for e in evaluated[1]:
if high_pair < e:
highest_card = False
if highest_card:
handscore = afterflop_sim.features['2-pair-good']
else:
handscore = afterflop_sim.features['2-pair-bad']
elif evaluated[2] == 2:
high_pair = evaluated[1][0]
num_card_greater = 0
for e in evaluated[1]:
if high_pair < e:
num_card_greater += 1
if num_card_greater == 0:
handscore = afterflop_sim.features['high-pair']
elif num_card_greater == 1:
handscore = afterflop_sim.features['middle-pair']
else:
handscore = afterflop_sim.features['low-pair']
elif evaluated[2] == 1:
hand_strength = preflop_sim.getPreflopStrength(herohand)
win_ratio = hand_strength[0] / (hand_strength[0] + hand_strength[2])
if win_ratio > afterflop_sim.REALLYGOODHAND:
handscore = features['really-good-high']
elif win_ratio > afterflop_sim.GOODHAND:
handscore = features['good-high']
elif win_ratio > afterflop_sim.MIDDLEHAND:
handscore = features['middle-high']
elif win_ratio > afterflop_sim.BADHAND:
handscore = features['bad-high']
else:
handscore = features['really-bad-high']
return handscore
def simulate(filename = "postriver_values", trials = 0):
#mat = []
#for j in range(16):
# mat.append([0,0,0])
mat = pickle.load(open(filename, "rb"))
for i in range(trials):
theDeck = cards.Deck()
theDeck.shuffle()
herohand = cards.Hand()
adversaryhand = cards.Hand()
table = cards.Hand()
for j in range(2):
herohand.add_card(theDeck.deal_card())
adversaryhand.add_card(theDeck.deal_card())
for j in range(5):
table.add_card(theDeck.deal_card())
handscore = getHandCode(herohand, table)
result = hands.compare_hands(herohand, adversaryhand, table)
if result == 'left':
mat[handscore][0] += 1
elif result == 'none':
mat[handscore][1] += 1
elif result == 'right':
mat[handscore][2] += 1
print mat
pickle.dump(mat, open(filename, "wb"))
def getStrength(hand, table, filename = "postriver_values"):
mat = pickle.load(open(filename, "rb"))
code = getHandCode(hand, table)
chances = mat[code]
s = chances[0] + chances[1] + chances[2]
return [chances[0] / float(s), chances[1] / float(s), chances[2] / float(s)]
#simulate("postriver_values", 900000)
def printMatrix(filename = "postriver_values"):
mat = pickle.load(open(filename, "rb"))
print mat
|
pmaddi/CPSC458_Final-Project
|
afterriver_sim.py
|
Python
|
mit
| 3,164
|
import datetime
from peewee import *
from .base import get_in_memory_db
from .base import ModelTestCase
from .base_models import *
def lange(x, y=None):
if y is None:
value = range(x)
else:
value = range(x, y)
return list(value)
class TestCursorWrapper(ModelTestCase):
database = get_in_memory_db()
requires = [User]
def test_iteration(self):
for i in range(10):
User.create(username=str(i))
query = User.select()
cursor = query.execute()
first_five = []
for i, u in enumerate(cursor):
first_five.append(int(u.username))
if i == 4: break
self.assertEqual(first_five, lange(5))
names = lambda i: [int(obj.username) for obj in i]
self.assertEqual(names(query[5:]), lange(5, 10))
self.assertEqual(names(query[2:5]), lange(2, 5))
for i in range(2):
self.assertEqual(names(cursor), lange(10))
def test_count(self):
for i in range(5): User.create(username=str(i))
with self.assertQueryCount(1):
query = User.select()
self.assertEqual(len(query), 5)
cursor = query.execute()
self.assertEqual(len(cursor), 5)
with self.assertQueryCount(1):
query = query.where(User.username != '0')
cursor = query.execute()
self.assertEqual(len(cursor), 4)
self.assertEqual(len(query), 4)
def test_nested_iteration(self):
for i in range(4): User.create(username=str(i))
with self.assertQueryCount(1):
query = User.select().order_by(User.username)
outer = []
inner = []
for o_user in query:
outer.append(int(o_user.username))
for i_user in query:
inner.append(int(i_user.username))
self.assertEqual(outer, lange(4))
self.assertEqual(inner, lange(4) * 4)
def test_iterator_protocol(self):
for i in range(3): User.create(username=str(i))
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
cursor = query.execute()
for _ in range(2):
for user in cursor: pass
it = iter(cursor)
for obj in it:
pass
self.assertRaises(StopIteration, next, it)
self.assertEqual([int(u.username) for u in cursor], lange(3))
self.assertEqual(query[0].username, '0')
self.assertEqual(query[2].username, '2')
self.assertRaises(StopIteration, next, it)
def test_iterator(self):
for i in range(3): User.create(username=str(i))
with self.assertQueryCount(1):
cursor = User.select().order_by(User.id).execute()
usernames = [int(u.username) for u in cursor.iterator()]
self.assertEqual(usernames, lange(3))
self.assertTrue(cursor.populated)
self.assertEqual(cursor.row_cache, [])
with self.assertQueryCount(0):
self.assertEqual(list(cursor), [])
def test_query_iterator(self):
for i in range(3): User.create(username=str(i))
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
usernames = [int(u.username) for u in query.iterator()]
self.assertEqual(usernames, lange(3))
with self.assertQueryCount(0):
self.assertEqual(list(query), [])
def test_row_cache(self):
def assertCache(cursor, n):
self.assertEqual([int(u.username) for u in cursor.row_cache],
lange(n))
for i in range(10): User.create(username=str(i))
with self.assertQueryCount(1):
cursor = User.select().order_by(User.id).execute()
cursor.fill_cache(5)
self.assertFalse(cursor.populated)
assertCache(cursor, 5)
cursor.fill_cache(5)
assertCache(cursor, 5)
cursor.fill_cache(6)
assertCache(cursor, 6)
self.assertFalse(cursor.populated)
cursor.fill_cache(11)
self.assertTrue(cursor.populated)
assertCache(cursor, 10)
class TestModelObjectCursorWrapper(ModelTestCase):
database = get_in_memory_db()
requires = [User, Tweet]
def test_model_objects(self):
huey = User.create(username='huey')
mickey = User.create(username='mickey')
for user, tweet in ((huey, 'meow'), (huey, 'purr'), (mickey, 'woof')):
Tweet.create(user=user, content=tweet)
query = (Tweet
.select(Tweet, User.username)
.join(User)
.order_by(Tweet.id)
.objects())
with self.assertQueryCount(1):
self.assertEqual([(t.username, t.content) for t in query], [
('huey', 'meow'),
('huey', 'purr'),
('mickey', 'woof')])
def test_dict_flattening(self):
u = User.create(username='u1')
for i in range(3):
Tweet.create(user=u, content='t%d' % (i + 1))
query = (Tweet
.select(Tweet, User)
.join(User)
.order_by(Tweet.id)
.dicts())
with self.assertQueryCount(1):
results = [(r['id'], r['content'], r['username']) for r in query]
self.assertEqual(results, [
(1, 't1', 'u1'),
(2, 't2', 'u1'),
(3, 't3', 'u1')])
class Reg(TestModel):
key = TextField()
ts = DateTimeField()
class TestSpecifyConverter(ModelTestCase):
requires = [Reg]
def test_specify_converter(self):
D = lambda d: datetime.datetime(2020, 1, d)
for i in range(1, 4):
Reg.create(key='k%s' % i, ts=D(i))
RA = Reg.alias()
subq = RA.select(RA.key, RA.ts, RA.ts.alias('aliased'))
ra_a = subq.c.aliased.alias('aliased')
q = (Reg
.select(Reg.key, subq.c.ts.alias('ts'),
ra_a.converter(Reg.ts.python_value))
.join(subq, on=(Reg.key == subq.c.key).alias('rsub'))
.order_by(Reg.key))
results = [(r.key, r.ts, r.aliased) for r in q.objects()]
self.assertEqual(results, [
('k1', D(1), D(1)),
('k2', D(2), D(2)),
('k3', D(3), D(3))])
results2 = [(r.key, r.rsub.ts, r.rsub.aliased)
for r in q]
self.assertEqual(results, [
('k1', D(1), D(1)),
('k2', D(2), D(2)),
('k3', D(3), D(3))])
|
coleifer/peewee
|
tests/results.py
|
Python
|
mit
| 6,695
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('challenges', '0005_auto_20150809_1703'),
]
operations = [
migrations.RenameField(
model_name='challenge',
old_name='challenge_data',
new_name='api_data',
),
migrations.RenameField(
model_name='challenge',
old_name='is_challenge_data_json',
new_name='is_api_data_json',
),
]
|
avinassh/learning-scraping
|
challenges/migrations/0006_auto_20150822_1315.py
|
Python
|
mit
| 570
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertEqual(dict(), empty_dict)
self.assertEqual(0, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(2, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual('uno', babel_fish['one'])
self.assertEqual("dos", babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
babel_fish['one'] = 'eins'
expected = {'two': 'dos', 'one': 'eins'}
self.assertEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = {'one': 'uno', 'two': 'dos'}
dict2 = {'two': 'dos', 'one': 'uno'}
self.assertEqual(True, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(2, len(babel_fish.keys()))
self.assertEqual(2, len(babel_fish.values()))
self.assertEqual(True, 'one' in babel_fish.keys())
self.assertEqual(False, 'two' in babel_fish.values())
self.assertEqual(False, 'uno' in babel_fish.keys())
self.assertEqual(True, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(
('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf',
'confused looking zebra'),
42)
self.assertEqual(5, len(cards))
self.assertEqual(42, cards['green elf'])
self.assertEqual(42, cards['yellow dwarf'])
|
Turivniy/Python_koans
|
python2/koans/about_dictionaries.py
|
Python
|
mit
| 1,986
|
# -*- coding: utf-8 -*-
import time
import sys
import socket
import cPickle
import os
from pydbg import *
from pydbg.defines import *
from util import *
PICKLE_NAME = "crash_info.pkl"
exe_path = "D:\\testPoc\\Easy File Sharing Web Server\\fsws.exe"
import threading
import time
host, port = "127.0.0.1", 80
global Running
global lock
global chance
global MAX_OFFSET
global OFFSET
chance = 2
Running = True
lock = threading.Lock()
def check_access_validation(dbg):
global chance
global Running
global lock
with lock:
if dbg.dbg.u.Exception.dwFirstChance:
chance -= 1
# prevent test next size.
Running = False
if chance==0:
Running = False
for seh_handler, nseh_handler in dbg.seh_unwind():
seh, nseh = seh_handler, nseh_handler
seh_offset = pattern_find(seh, MAX_OFFSET)
if seh_offset!=-1:
break
print "[+] crash in %d words" % OFFSET
print "[+] seh offset %s." % seh_offset
with open(PICKLE_NAME, "wb") as phase_file:
cPickle.dump(OFFSET, phase_file)
cPickle.dump(seh_offset, phase_file)
cPickle.dump(seh, phase_file)
cPickle.dump(nseh, phase_file)
with open("crash.txt", "w") as f:
f.write("seh: 0x%08x\n" % seh)
f.write("nseh: 0x%08x\n" % nseh)
f.write(dbg.dump_context(stack_depth=1000))
dbg.terminate_process()
return DBG_EXCEPTION_NOT_HANDLED
else:
Running = True
return DBG_EXCEPTION_NOT_HANDLED
return DBG_EXCEPTION_NOT_HANDLED
class Fuzzer(object):
def __init__(self, exe_path, max_offset = 8000):
self.exe_path = exe_path
self.pid = None
self.dbg = None
global MAX_OFFSET
MAX_OFFSET = max_offset
# self.running = True
self.dbgThread = threading.Thread(target=self.start_debugger)
self.dbgThread.setDaemon(False)
self.dbgThread.start()
# Wait debugger start process
while self.pid is None:
time.sleep(1)
self.monitorThread = threading.Thread(target=self.monitor_debugger)
self.monitorThread.setDaemon(False)
self.monitorThread.start()
def monitor_debugger(self):
global Running
global OFFSET
test_words = 0
raw_input("[+] Please start the debugger...")
while Running and MAX_OFFSET>test_words:
with lock:
if not Running:
break
test_words += 100
OFFSET = test_words
print "[+] test %d words" % test_words
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
buffer = pattern_create(test_words)
httpreq = (
"GET /changeuser.ghp HTTP/1.1\r\n"
"User-Agent: Mozilla/4.0\r\n"
"Host:" + host + ":" + str(port) + "\r\n"
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
"Accept-Language: en-us\r\n"
"Accept-Encoding: gzip, deflate\r\n"
"Referer: http://" + host + "/\r\n"
"Cookie: SESSIONID=6771; UserID=" + buffer + "; PassWD=;\r\n"
"Conection: Keep-Alive\r\n\r\n"
)
s.send(httpreq)
s.close()
# prevent execute to fast.
time.sleep(1)
if not os.path.isfile(PICKLE_NAME):
print "[+] No found bug."
Running = False
self.dbg.terminate_process()
else:
print "[+] Find bug."
'''
Try to start debugger and run it.
'''
def start_debugger(self):
try:
self.dbg = pydbg()
self.dbg.load(self.exe_path)
self.pid = self.dbg.pid
except pdx:
print "[+] Can't open file, please check file path"
sys.exit(1)
except Exception as e:
print "[+] Unknow error: ", str(e)
sys.exit(1)
self.dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, check_access_validation)
self.dbg.run()
exe_path = "D:\\testPoc\\Easy File Sharing Web Server\\fsws.exe"
Fuzzer(exe_path)
|
b09780978/SEH_Fuzzer
|
SEH_Fuzzer/Seh_bug_fuzzer.py
|
Python
|
mit
| 3,793
|
import numpy as np
import matplotlib.pyplot as plt
import sys
### Plot the analytical solution of the heat equation
# * Steady-state
# * No advection
# * Constant heat conductivity
# * Constant heat production
#
# Choosable Dirichlet + von Neumann boundary conditions
# T=T0 at z=z1
# q=q0 at z=z2
###### Start configurable parameters ######
# Define heat conductivity, W/mK
k = 2.5
# Define heat production rate, W/m^3
A = 1.11e-6 #1.8e-6
### Define boundary condition value
# location and value of boundary condition one (von neumann)
z1 = 0.0 #40000.0
q0 = 60.0e-3 #15e-3
# location and value of boundary condition two (dirichlet)
z2 = 0.0
T0 = 0 #20.0
# Define height of the model, meters
L = 40000.0
# Define the x-axis limits of the plot
xlimits = (0.0, 1000.0)
###### End of configurable parameters ######
### Main program:
# Calc depth range for the plot
N = 100 # num of points we use for plotting
z = np.linspace(0, L, N)
# Calculate integration constants
Ca = q0 + A*z1
Cb = -q0 * z2 - A*z1*z2 + k*T0 + 0.5*A*z2**2
### Evaluate temperature at chosen range
T = (- 0.5 * A * z**2 + Ca*z + Cb) / k
# Generate line to plot the temperature gradient (dT/dz = q/k) at the bottom boundary
Tbot_grad = [T[N-1], T[N-1] - ( k*(T[N-1]-T[N-2])/(z[N-1]-z[N-2]) )*(L/3.0)/k]
zbot_grad = [-z[N-1], -z[N-1] + (L/3.0)]
# Plot the geotherm
plt.figure()
plt.plot(T, -z, "-b") # T on horizontal axis, z on vertical, pointing down (minus sign), blue solid line
plt.plot(Tbot_grad, zbot_grad, "--r") # Plot the temperature gradient at the bottom boundary, dashed red line
plt.xlabel("Temperature (deg C)")
plt.ylabel("Depth (m)")
plt.title("Geotherm")
plt.grid()
plt.xlim(xlimits)
plt.show()
|
HUGG/NGWM2016-modelling-course
|
Lessons/05-Finite-differences/scripts/plot_steady_state_heat_eq_innerbnd.py
|
Python
|
mit
| 1,717
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cmd_pkg"
PROJECT_SPACE_DIR = "/home/master/Documents/Guerledan/ROSonPi/glider_dir/install"
PROJECT_VERSION = "0.0.0"
|
Projet-Guerledan/ROSonPi
|
glider_dir/build/cmd_pkg/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
mit
| 400
|
# In order to collect the relevant articles, I used LexisNexis Academic (http://www.lexisnexis.com/hottopics/lnacademic) to search for all instances of Planned Parenthood in The New York Times, from when it first appeared in 1969 to the present.
# I batch downloaded files, 500 at a time, and then culled them into a single .txt file, “PP_NYT.txt”.
# I then used 02_split_ln.py to take the long .txt file of articles and organize it into a .csv file, where each observation (row) was a single article. To do that I ran the following code:
# First cd into the correct working directory (be sure to put in your own working directory)
cd /Users/elizabeth/Documents/Berkeley/PS239T/ps239T-final-project
python Code/02_split_ln.py Data/PP_NYT.txt
# This should produce a .csv file that will have the data, which can be used for the subsequent analyses.
|
elizabethdherman/ps239T-final-project
|
Code/01_data_setup.py
|
Python
|
mit
| 855
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import poisson
def quantile_normalize(df):
"""
input: dataframe with numerical columns
output: dataframe with quantile normalized values
"""
df_sorted = pd.DataFrame(np.sort(df.values,
axis=0),
index=df.index,
columns=df.columns)
df_mean = df_sorted.mean(axis=1)
df_mean.index = np.arange(1, len(df_mean) + 1)
df_qn =df.rank(method="min").stack().astype(int).map(df_mean).unstack()
return(df_qn)
df = pd.read_csv("grades.csv")
print(df)
df.plot.density()
df_qn = quantile_normalize(df)
print(df_qn)
df_qn.plot.density()
plt.show()
|
jdurbin/sandbox
|
python/quantile/qn.py
|
Python
|
mit
| 809
|
# -*- encoding: utf8 -*-
import django_filters
from django import forms
from django_filters.widgets import LinkWidget
from .models import Film, SlaveCatalog
from django.shortcuts import render
class FilmFilter(django_filters.FilterSet):
genre = django_filters.AllValuesFilter(widget=LinkWidget, label='')
name = django_filters.CharFilter(lookup_type='icontains', label='',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Teclea el nombre de la película...'}
))
# director = django_filters.AllValuesFilter(widget=LinkWidget, label='Director')
class Meta:
model = SlaveCatalog
fields = ['genre', 'name']
|
gomezhyuuga/moviewatch
|
movies/filters.py
|
Python
|
mit
| 688
|
"""
tornadochat.py
A TCP chat server in the style of 'Chatroulette' using Tornado.
Part of a study in concurrency and networking in Python:
https://github.com/mjwestcott/chatroulette in which I create versions of this
server using asyncio, gevent, Tornado, and Twisted.
Some 'features':
- on connection, clients are prompted for their name, which will prefix all
of their sent messages;
- the server will notify clients when they are matched with a partner or
their partner disconnects;
- clients whose partner disconnects will be put back in a waiting list to
be matched again;
- clients in the waiting list will periodically be sent a Nietzsche aphorism
to keep them busy;
- clients can issue the following commands:
/help -> describes the service, including the commands below
/quit -> close the connection
/next -> end current chat and wait for a new partner
- the /next command will remember the rejected partners so as not to match
them together again.
Clients are expected to connect via telnet.
"""
from tornado.tcpserver import TCPServer
from tornado.ioloop import IOLoop
from tornado.locks import Lock
import tornado.gen
import random
HOST = 'localhost'
PORT = 12345
clients = {} # A mapping from names (bytes) to Client instances.
waiting = set() # Clients without a partner i.e. waiting to chat.
lock = Lock() # Protects the waiting set.
with open("nietzsche.txt", "r") as f:
# We'll use these to keep waiting clients busy.
aphorisms = list(filter(bool, f.read().split("\n")))
def main():
server = ChatServer()
server.listen(PORT, address=HOST)
try:
IOLoop.current().start()
except KeyboardInterrupt:
IOLoop.current().stop()
class ChatServer(TCPServer):
def handle_stream(self, stream, address):
c = Client(stream)
IOLoop.current().spawn_callback(c.handler)
class Client:
"""A client of the chat server. A new instance is created for every TCP
connection and the handle method is called."""
def __init__(self, stream):
self.stream = stream
self.name = None
self.partner = None
self.rejected = set()
async def handler(self):
"""Handle the lifetime of the connection. Receive all messages from the
stream and send them to the appropriate partner."""
await self.send(
b'Server: Welcome to TCP chat roulette! '
b'You will be matched with a partner.\n'
b'Server: What is your name?\n')
# Main client read loop. Read data until '\n' or disconnection.
while True:
if self.name is not None and self.name not in clients:
# We have been removed from client list by self.disconnected(),
# so shut down this handler.
return
try:
data = await self.stream.read_until(b'\n')
if self.name is None:
# First interaction with client is to set their name.
if await self.set_name(data):
# Successful onboarding; match client.
clients[self.name] = self
IOLoop.current().spawn_callback(self.match)
else:
continue
elif data.startswith(b'/'):
await self.handle_cmd(data)
else:
await self.message_partner(data)
except OSError:
await self.disconnected()
async def set_name(self, data):
"""Return False if name unable to be set because name is already taken.
Otherwise: set the client's name and return True."""
name = data.strip()
if name in clients:
await self.send(b'Server: Sorry, name is taken. Please choose again.\n')
return False
self.name = name
await self.send(b'Server: Hello, %b. Please wait for a partner.\n' % self.name)
return True
async def send(self, msg):
"""Send msg to self."""
assert isinstance(msg, bytes)
try:
await self.stream.write(msg)
except OSError:
await self.disconnected()
async def message_partner(self, msg):
"""Send msg from the self to partner. Prefix the message with the
sender's name."""
assert isinstance(msg, bytes)
partner = self.partner
if partner is None:
await self.send(b'Server: Sorry, no partner. Please wait.\n')
else:
await partner.send(b'%b: %b' % (self.name, msg))
async def handle_cmd(self, data):
if data.startswith(b'/help'):
await self.send(
b'Server: Welcome to TCP chat roulette! You will be matched with a partner.\n'
b'\t/help -> display this help message\n'
b'\t/quit -> close the connection\n'
b'\t/next -> end current chat and wait for a new partner\n')
elif data.startswith(b'/quit'):
await self.disconnected()
elif data.startswith(b'/next'):
other = self.partner
if other is None:
# Command issued when not enagaged in chat with a partner.
await self.send(b'Server: Sorry, no partner. Please wait.\n')
return
await self.send(b'Server: Chat over. Please wait for a new partner.\n')
self.rejected.add(other)
self.partner = None
IOLoop.current().spawn_callback(self.match)
# Let down the partner gently.
await other.partner_disconnected()
else:
await self.send(b'Server: Command not recognised.\n')
async def disconnected(self):
if self.name in clients:
del clients[self.name]
self.stream.close()
# Notify the client's partner, if any.
other = self.partner
if other is not None:
await other.partner_disconnected()
async def partner_disconnected(self):
self.partner = None
await self.send(b'Server: Partner disconnected. Please wait.\n')
IOLoop.current().spawn_callback(self.match)
async def match(self):
tries = 1
while True:
# Each iteration needs to be 'atomic'. Hence the lock. Every waiting
# client will spawn a matcher and attempt to find a partner. We
# don't want any context switches to run another client's matcher
# midway through.
async with lock:
# Find any clients who do not have a partner and add them to the
# waiting set.
waiting.update(c for c in clients.values() if c.partner is None)
# Find any clients in the waiting set who have disconnected and
# remove them from the waiting set. (If they are disconnected
# they will have been removed from the client list.)
waiting.intersection_update(clients.values())
if self not in waiting:
# We've been matched by our partner or we've disconnected.
return
if len(waiting) >= 2:
# Attempt to match clients.
A = self
wanted = waiting - A.rejected
partners = [B for B in wanted if A not in B.rejected and A != B]
if partners:
# Match succeeded.
B = partners.pop()
waiting.remove(A)
waiting.remove(B)
A.partner = B
B.partner = A
await A.send(b'Server: Partner found! Say hello.\n')
await B.send(b'Server: Partner found! Say hello.\n')
return
# Match failed. Periodically send something interesting.
if tries % 5 == 0:
aphorism = random.choice(aphorisms)
await self.send(
b'Server: Thanks for waiting! Here\'s Nietzsche:\n'
b'\n%b\n\n' % aphorism.encode("utf-8"))
# Exponential backoff up to a maximum sleep of 20 secs.
await tornado.gen.sleep(min(20, (tries**2)/4))
tries += 1
if __name__ == '__main__':
main()
|
mjwestcott/chatroulette
|
tornadochat.py
|
Python
|
mit
| 8,485
|
# -*- coding: utf-8 -*-
# (C) 2015 Muthiah Annamalai
from opentamiltests import *
from solthiruthi.morphology import RemoveCaseSuffix #, RemovePlural
import re
import codecs
from tamil import utf8
class RemoveSuffixTest(unittest.TestCase):
def test_basic_suffix_stripper(self):
obj = RemoveCaseSuffix()
actual = []
expected = [u"பதிவிற்",u"கட்டளைக",u"அவர்"]
words_list = [u"பதிவிற்க்கு",u"கட்டளைகளை",u"அவர்கள்"]
for w,x in zip(words_list,expected):
rval = obj.removeSuffix(w)
actual.append(rval[0])
#self.assertTrue(rval[1])
#print(utf8.get_letters(w),'->',rval[1])
self.assertEqual(actual,expected)
if __name__ == "__main__":
unittest.main()
|
atvKumar/open-tamil
|
tests/solthiruthi_suffixremoval.py
|
Python
|
mit
| 840
|
import theano
import os
import numpy as np
from theano import tensor
from blocks.initialization import Constant
from blocks.bricks import Linear, Tanh, NDimensionalSoftmax
from bricks import AssociativeLSTM, LSTM
from fuel.datasets import IterableDataset
from fuel.streams import DataStream
from blocks.model import Model
from blocks.bricks.cost import CategoricalCrossEntropy
from blocks.algorithms import (GradientDescent,
StepClipping, CompositeRule,
Adam)
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.extensions import Printing
from blocks.graph import ComputationGraph
import logging
from utils import SaveLog, Glorot
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
floatX = theano.config.floatX
def get_episodic_copy_data(time_steps, n_data, n_sequence, batch_size):
seq = np.random.randint(1, high=9, size=(n_data, n_sequence))
zeros1 = np.zeros((n_data, time_steps - 1))
zeros2 = np.zeros((n_data, time_steps))
marker = 9 * np.ones((n_data, 1))
zeros3 = np.zeros((n_data, n_sequence))
x = np.concatenate((seq, zeros1, marker, zeros3), axis=1).astype('int32')
y = np.concatenate((zeros3, zeros2, seq), axis=1).astype('int32')
x = x.reshape(n_data / batch_size, batch_size, 1, -1)
x = np.swapaxes(x, 2, 3)
x = np.swapaxes(x, 1, 2)
x = x[..., 0]
z = np.zeros(x.shape)
one_hot_x = np.zeros((x.shape[0], x.shape[1], x.shape[2], 10))
for c in range(10):
z = z * 0
z[np.where(x == c)] = 1
one_hot_x[..., c] += z
y = y.reshape(n_data / batch_size, batch_size, 1, -1)
y = np.swapaxes(y, 2, 3)
y = np.swapaxes(y, 1, 2)
y = y[..., 0]
z = np.zeros(y.shape)
one_hot_y = np.zeros((y.shape[0], y.shape[1], y.shape[2], 9))
for c in range(9):
z = z * 0
z[np.where(y == c)] = 1
one_hot_y[..., c] += z
return one_hot_x, one_hot_y
batch_size = 2
num_copies = 1
x_dim = 10
h_dim = 128
o_dim = 9
model = 'alstm'
if model == 'lstm':
coeff = 4
bias = 0
save_path = 'lstm_path'
elif model == 'lstm_f1':
coeff = 4
bias = 1
save_path = 'lstm_f1_path'
elif model == 'alstm':
coeff = 4.5
use_W_xu = False
save_path = 'alstm_path'
print 'Building model ...'
# T x B x F
x = tensor.tensor3('x', dtype=floatX)
# T x B x F'
y = tensor.tensor3('y', dtype=floatX)
x_to_h = Linear(name='x_to_h',
input_dim=x_dim,
output_dim=coeff * h_dim)
x_transform = x_to_h.apply(x)
if model == 'alstm':
lstm = AssociativeLSTM(activation=Tanh(),
dim=h_dim,
num_copies=num_copies,
use_W_xu=use_W_xu,
name="lstm")
else:
lstm = LSTM(activation=Tanh(),
dim=h_dim,
bias=bias,
name="lstm")
h, c = lstm.apply(x_transform)
h_to_o = Linear(name='h_to_o',
input_dim=h_dim,
output_dim=o_dim)
o = h_to_o.apply(h)
o = NDimensionalSoftmax().apply(o, extra_ndim=1)
for brick in (lstm, x_to_h, h_to_o):
brick.weights_init = Glorot()
brick.biases_init = Constant(0)
brick.initialize()
cost = CategoricalCrossEntropy().apply(y, o)
cost.name = 'CE'
print 'Bulding training process...'
shapes = []
for param in ComputationGraph(cost).parameters:
# shapes.append((param.name, param.eval().shape))
shapes.append(np.prod(list(param.eval().shape)))
print "Total number of parameters: " + str(np.sum(shapes))
if not os.path.exists(save_path):
os.makedirs(save_path)
log_path = save_path + '/log.txt'
fh = logging.FileHandler(filename=log_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
algorithm = GradientDescent(cost=cost,
parameters=ComputationGraph(cost).parameters,
step_rule=CompositeRule([StepClipping(10.0),
Adam(1e-3)])) # 3e-4
monitor_cost = TrainingDataMonitoring([cost],
prefix='train',
after_epoch=False,
before_training=True,
every_n_batches=1000)
data = get_episodic_copy_data(100, int(1e6), 10, batch_size)
dataset = IterableDataset({'x': data[0].astype('int8'),
'y': data[1].astype('int8')})
stream = DataStream(dataset)
model = Model(cost)
main_loop = MainLoop(data_stream=stream, algorithm=algorithm,
extensions=[monitor_cost,
Printing(after_epoch=False,
every_n_batches=1000),
SaveLog(every_n_batches=1000)],
model=model)
print 'Starting training ...'
main_loop.run()
|
mohammadpz/Associative_LSTM
|
main.py
|
Python
|
mit
| 4,956
|
# codplayer - base class for the audio devices
#
# Copyright 2013 Peter Liljenberg <peter.liljenberg@gmail.com>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
import array
import time
import sys
import threading
import alsaaudio
from . import sink
from . import model
class PyAlsaSink(sink.Sink):
"""Very simple ALSA sink in only Python. The C version has a
separate thread to ensure that the device play loop runs without
interferance from the Python GC or Big Interpreter Lock, but we
don't gain anything from that here.
There may be some race conditions around pause/resume if things go
haywire in the sink thread at the same time, but this code does
not attempt to fix that.
"""
# Run on approx 10 Hz. pyalsaaudio will hardcode the hardware buffer to
# four periods.
PERIOD_SIZE = 4096
def __init__(self, player, card, start_without_device, log_performance):
self.log = player.log
self.debug = player.debug
self.alsa_card = card
# State attributes protected by lock
self.lock = threading.Lock()
self.alsa_pcm = None
self.paused = False
# End of thread state attributes
# State attributes only used within the sink thread
self.channels = None
self.bytes_per_sample = None
self.rate = None
self.big_endian = None
self.alsa_swap_bytes = False
self.period_bytes = None
self.partial_period = None
self.partial_packet = None
self.device_error = None
# End of sink thread state attributes
self.log("using python implementation of ALSA sink - you might get glitchy sound");
# See if we can open the device, just for logging purposes -
# this will be properly handled in start().
try:
self.debug('alsa: opening device for card: {0}', self.alsa_card)
pcm = alsaaudio.PCM(type = alsaaudio.PCM_PLAYBACK,
mode = alsaaudio.PCM_NORMAL,
card = self.alsa_card)
pcm.close()
except alsaaudio.ALSAAudioError, e:
if self.start_without_device:
self.log('alsa: error opening card {0}: {1}',
self.alsa_card, e)
self.log('alsa: proceeding since start_without_device = True')
self.device_error = str(e)
else:
raise sink.SinkError(e)
def pause(self):
with self.lock:
if self.alsa_pcm:
try:
if not self.paused:
self.alsa_pcm.pause(1)
self.paused = True
return True
except alsaaudio.ALSAAudioError, e:
self.log('error while pausing: {0}', e)
return False
def resume(self):
with self.lock:
if self.paused:
self.paused = False
if self.alsa_pcm:
try:
self.alsa_pcm.pause(0)
except alsaaudio.ALSAAudioError, e:
self.log('error while resuming: {0}', e)
def stop(self):
with self.lock:
pcm = self.alsa_pcm
paused = self.paused
self.alsa_pcm = None
self.device_error = None
self.paused = False
# pyalsaaudio will drain the buffer on close, no way around that
if pcm:
try:
# And since it drains, it can't be paused
if paused:
pcm.pause(0)
pcm.close()
except alsaaudio.ALSAAudioError, e:
self.log('PyAlsaSink.stop: error when closing: {0}'.format(e))
def start(self, channels, bytes_per_sample, rate, big_endian):
self.channels = channels
self.bytes_per_sample = bytes_per_sample
self.rate = rate
self.big_endian = big_endian
self.paused = False
self._try_open_pcm()
def add_packet(self, data, packet):
"""Push data into the device. To quickly(ish) react to
transport state changes we're not looping here, but rather
lets the sink thread do that.
"""
stored = 0
if self.partial_period:
# Append to left-overs from last call
stored = min(self.period_bytes - len(self.partial_period), len(data))
if stored > 0:
self.partial_period += str(buffer(data, 0, stored))
else:
assert stored == 0
packet = self.partial_packet
if len(self.partial_period) == self.period_bytes:
if self._play_period(self.partial_period):
self.partial_period = None
self.partial_packet = None
elif len(data) >= self.period_bytes:
# At least one whole period to push into the device
if self._play_period(buffer(data, 0, self.period_bytes)):
stored = self.period_bytes
else:
# Not enough data for a whole period, save it for the next call
assert len(data) < self.period_bytes
self.partial_period = str(data)
self.partial_packet = packet
stored = len(data)
return stored, packet, self.device_error
def drain(self):
if self.partial_period:
# Pad final packet and push into buffer
n = self.period_bytes - len(self.partial_period)
if n > 0:
self.partial_period = self.partial_period + ('\0' * n)
packet = self.partial_packet
if self._play_period(self.partial_period):
self.partial_period = None
self.partial_packet = None
# Always return here to ensure feedback on last packet.
# We'll get called again to drain properly after this.
return packet, self.device_error
# pyalsaaudio will (here usefully) drain before closing
with self.lock:
pcm = self.alsa_pcm
paused = self.paused
self.alsa_pcm = None
self.device_error = None
self.paused = False
try:
# Ensure we're not paused so this can drain
if paused:
pcm.pause(0)
pcm.close()
except alsaaudio.ALSAAudioError, e:
self.log('PyAlsaSink.drain: error when closing: {0}'.format(e))
return None
def _play_period(self, data):
with self.lock:
pcm = self.alsa_pcm
if pcm is None:
pcm = self._try_open_pcm()
if pcm is None:
# Don't busyloop here
time.sleep(3)
return False
if self.alsa_swap_bytes:
# Heavy-handed assumptions about data formats etc
a = array.array('h', str(data))
assert a.itemsize == 2
a.byteswap()
data = a.tostring()
try:
n = pcm.write(data)
return n > 0
except alsaaudio.ALSAAudioError, e:
self.log('alsa: error writing to device: {0}', e)
self.device_error = str(e)
with self.lock:
self.alsa_pcm = None
try:
pcm.close()
except alsaaudio.ALSAAudioError, e:
self.log('alsa: ignoring error when closing after write failure: {0}', e)
return False
def _try_open_pcm(self):
try:
pcm = alsaaudio.PCM(type = alsaaudio.PCM_PLAYBACK,
mode = alsaaudio.PCM_NORMAL,
card = self.alsa_card)
except alsaaudio.ALSAAudioError, e:
self.log('alsa: error opening card {0}: {1}',
self.alsa_card, e)
return None
if self._set_device_format(pcm):
if self.paused:
# Reopen into the right state
try:
pcm.pause(1)
except alsaaudio.ALSAAudioError, e:
self.log('error while trying to pause newly opened device: {0}', e)
pcm.close()
pcm = None
with self.lock:
self.alsa_pcm = pcm
return pcm
else:
pcm.close()
return None
def _set_device_format(self, pcm):
if self.big_endian:
format = alsaaudio.PCM_FORMAT_S16_BE
else:
format = alsaaudio.PCM_FORMAT_S16_LE
try:
v = pcm.setformat(format)
# Card accepts CD byte order
if v == format:
self.alsa_swap_bytes = False
# Try byte swapped order instead
else:
self.debug('alsa: swapping bytes')
self.alsa_swap_bytes = True
if format == alsaaudio.PCM_FORMAT_S16_BE:
format = alsaaudio.PCM_FORMAT_S16_LE
else:
format = alsaaudio.PCM_FORMAT_S16_BE
v = pcm.setformat(format)
if v != format:
self.log("alsa: can't set S16_BE/S16_LE format, card stuck on {0}", v)
self.device_error = "sample format not accepted"
return False
v = pcm.setrate(self.rate)
if v != self.rate:
self.log("alsa: can't set rate to {0}, card stuck on {1}", self.rate, v)
self.device_error = "sample format not accepted"
return False
v = pcm.setchannels(self.channels)
if v != self.channels:
self.log("alsa: can't set channels to {0}, card stuck on {1}", self.channels, v)
self.device_error = "sample format not accepted"
return False
v = pcm.setperiodsize(self.PERIOD_SIZE)
if v != self.PERIOD_SIZE:
self.log('alsa: card refused our period size of {0}, using {1} instead',
self.PERIOD_SIZE, v)
self.period_bytes = v * self.channels * self.bytes_per_sample
return True
except alsaaudio.ALSAAudioError, e:
self.log('alsa: error setting format: {0}', e)
self.device_error = str(e)
return False
|
petli/codplayer
|
src/codplayer/py_alsa_sink.py
|
Python
|
mit
| 10,530
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['djangodash']
|
noamsu/djangodash2012
|
djangodash/migrations/0001_initial.py
|
Python
|
mit
| 319
|
import copy
import pyspawn
import matplotlib.pyplot as plt
import numpy as np
import h5py
import glob
au_to_fs = 0.02418884254
au_to_ev = 13.6
au_to_ang = 0.529177
def plot_total_energies(time, toten, keys, istates_dict, colors, markers, linestyles):
"""Plots total classical energies for each trajectory and saves it to png file.
This plot is very useful to check if energy is conserved.
Color is represented by the istate of a trajectory
Linestyle and Markers represent different trajectories
"""
ax = plt.figure("Total Energies", figsize=(4.8, 3.6))
min_energy = min(toten["00"]) * au_to_ev
max_energy = max(toten["00"]) * au_to_ev
for index, key in enumerate(keys):
if min(toten[key]*au_to_ev) < min_energy:
min_energy = min(toten[key]*au_to_ev)
if max(toten[key]*au_to_ev) > max_energy:
max_energy = max(toten[key]*au_to_ev)
plt.plot(time[key]*au_to_fs, toten[key]*au_to_ev-min_energy, label=key, color=colors[int(istates_dict[key])],
linestyle=linestyles[index], marker=markers[index])
plt.xlabel('Time, fs')
plt.ylabel('Total Energy, eV')
# plt.ylim([min_energy - 0.05 * (max_energy - min_energy), max_energy + 0.05 * (max_energy - min_energy)])
plt.legend()
plt.tick_params(axis='both', which='major')
plt.title('Total Energies')
plt.tight_layout()
ax.savefig("Total_Energies.png", dpi=300)
def plot_total_pop(time, el_pop, nstates, colors):
""" This plots the total electronic population on each
electronic state (over all basis functions)"""
g5 = plt.figure("Total Electronic Populations", figsize=(4.8, 3.6))
for n_state in range(nstates):
plt.plot(time*au_to_fs, el_pop[:, n_state], color=colors[n_state],
label='S' + str(n_state))
plt.xlabel('Time, fs')
plt.ylabel('Population')
plt.title('Total Electronic Population')
plt.legend()
plt.tight_layout()
g5.savefig("Total_El_pop.png", dpi=300)
def plot_e_gap(time, poten, keys, state1, state2, istates_dict, colors, linestyles, markers):
"""Plots gaps between the specified states for all trajectories
istates order needs to be fixed!
"""
g2 = plt.figure("Energy gap", figsize=(4.8, 3.6))
for index, key in enumerate(keys):
plt.plot(time[key]*au_to_fs, poten[key][:, state2]*au_to_ev - poten[key][:, state1]*au_to_ev,
linestyle=linestyles[index], marker=markers[index], color=colors[int(istates_dict[key])],
label=key + ": " + r'$S_{}$'.format(state2) + "-"
+ r'$S_{}$'.format(state1))
plt.xlabel('Time, fs')
# plt.title('Energy gaps, au')
plt.ylabel('Energy gap, eV')
plt.legend()
plt.tight_layout()
g2.savefig("E_gap.png", dpi=300)
def plot_energies(keys, time, poten, numstates, colors, linestyles):
g3 = plt.figure("Energies", figsize=(4.8, 3.6))
for index_key, key in enumerate(keys):
for index_state, n in enumerate(range(numstates)):
plt.plot(time[key]*au_to_fs, poten[key][:, n]*au_to_ev,
label=key + ": " + 'S' + str((n + 1)), linestyle=linestyles[index_key],
color=colors[index_state])
plt.xlabel('Time, fs')
plt.ylabel('Energy, eV')
plt.legend()
plt.tight_layout()
g3.savefig("Energies.png", dpi=300)
def plot_tdc(time, tdc, keys, numstates, istates_dict, spawnthresh, colors, linestyles, markers):
plt.figure("Time-derivative couplings", figsize=(4.8, 3.6))
for index_key, key in enumerate(keys):
# we only plot subset of trajectories but need all of them to match istates with labels
for n in range(numstates):
if n != int(istates_dict[key]):
# we don't plot coupling with itself which is zero
plt.plot(time[key][:len(tdc[key])]*au_to_fs, np.abs(tdc[key][:, n]),
color=colors[istates_dict[key]], linestyle=linestyles[index_key],
marker=markers[index_key])
plt.axhline(y=spawnthresh, alpha=0.5, color='r', linewidth=1.0,
linestyle='--')
plt.xlabel('Time, fs')
plt.ylabel('Coupling, au')
# for m in range(nstates):
# plt.text(spawn_times[m], plt.ylim()[1]-0.05, all_keys[m], fontsize=10)
plt.title('Time-derivative couplings, thresh='+str(spawnthresh))
# plt.tight_layout
plt.subplots_adjust(right=0.8)
plt.savefig("all_tdc.png", dpi=300)
def plot_bonds(time, keys, bonds_list, bonds_array, colors, linestyles):
bond_labels = []
for n in range(np.shape(bonds_list)[0]):
bond_labels.append(str(bonds_list[n, 0]) + "-" + str(bonds_list[n, 1]))
plt.figure("Bonds", figsize=(4.8, 3.6))
for index_key, key in enumerate(keys):
for n in range(np.shape(bonds_list)[0]):
plt.plot(time[key]*au_to_fs, bonds_array[key][:, n]*au_to_ang, color=colors[index_key],
linestyle=linestyles[n], label=key) #+ ":" + bond_labels[n])
plt.xlabel('Time, fs')
plt.gca().set_ylabel('Distance, ' + r'$\AA$')
#plt.ylabel('Bond length, ang')
plt.legend()
# plt.title('Bond lengths')
plt.tight_layout()
plt.savefig("bonds.png", dpi=300)
def plot_diheds(time, keys, diheds_list, diheds_array, colors, linestyles):
diheds_labels = []
for n in range(np.shape(diheds_list)[0]):
diheds_labels.append(str(diheds_list[n, 0]) + "-" + str(diheds_list[n, 1]) + "-"
+ str(diheds_list[n, 2]) + "-" + str(diheds_list[n, 3]))
plt.figure("Dihedral angles", figsize=(4.8, 3.6))
for index_key, key in enumerate(keys):
for n in range(np.shape(diheds_list)[0]):
plt.plot(time[key]*au_to_fs, diheds_array[key][:, n], color=colors[index_key],
linestyle=linestyles[n], label=key) #+ ":" + diheds_labels[n])
plt.xlabel('Time, fs')
plt.ylabel('Angle, ' + u'\N{DEGREE SIGN}')
plt.legend()
# plt.title('Dihedral Angles')
plt.tight_layout()
plt.savefig("dihedral_angs.png", dpi=300)
|
blevine37/pySpawn17
|
pyspawn/plotting/traj_plot.py
|
Python
|
mit
| 6,119
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="parcoords.labelfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/parcoords/labelfont/_family.py
|
Python
|
mit
| 517
|
# -*- coding: utf-8 -*-
"""
auto rule template
~~~~
:author: LoRexxar <LoRexxar@gmail.com>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from utils.api import *
class CVI_1002():
"""
rule class
"""
def __init__(self):
self.svid = 1002
self.language = "php"
self.author = "LoRexxar/wufeifei"
self.vulnerability = "SSRF"
self.description = "file_get_contents函数的参数可控,可能会导致SSRF漏洞"
self.level = 7
# status
self.status = True
# 部分配置
self.match_mode = "function-param-regex"
self.match = r"file_get_contents"
# for solidity
self.match_name = None
self.black_list = None
# for chrome ext
self.keyword = None
# for regex
self.unmatch = None
self.vul_function = None
def main(self, regex_string):
"""
regex string input
:regex_string: regex match string
:return:
"""
pass
|
LoRexxar/Cobra-W
|
rules/php/CVI_1002.py
|
Python
|
mit
| 1,175
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
p = None
for i in xrange(len(A) - 1):
if A[i] > A[i+1]:
if p is not None:
return False
p = i
return (p is None or p == 0 or p == len(A)-2 or
A[p-1] <= A[p+1] or A[p] <= A[p+2])
|
Lanceolata/code-problems
|
python/leetcode_easy/Question_184_Non_decreasing_Array.py
|
Python
|
mit
| 453
|
'''
Handles files, folders and paths.
'''
import os
import shutil
from datetime import datetime
from datetime import date
from GeoConverter import settings
from OGRgeoConverter.jobs import jobidentification
def store_uploaded_file(job_id, file_data, file_name):
'''
Takes file data as argument and stores it in the path specified by job_id and file_name
'''
upload_folder_path = get_upload_folder_path(job_id)
upload_file_path = os.path.join(upload_folder_path, file_name)
with open(upload_file_path, 'wb+') as destination:
for chunk in file_data.chunks():
destination.write(chunk)
def rename_file(folder_path, old_file_name, new_file_name):
old_file_path = os.path.join(folder_path, old_file_name)
new_file_path = os.path.join(folder_path, new_file_name)
os.rename(old_file_path, new_file_path)
def create_job_folders(job_id):
os.makedirs(get_upload_folder_path(job_id))
os.makedirs(get_extract_folder_path(job_id))
os.makedirs(get_output_folder_path(job_id))
os.makedirs(get_download_folder_path(job_id))
def remove_old_folders():
'''
Cleans up conversion folders older than 2 days
'''
folder_path = get_conversion_job_folder()
today = datetime.now().date()
for folder in os.listdir(folder_path):
parts = folder.split('_')
if len(parts) == 3:
folder_date = date(int(parts[0]), int(parts[1]), int(parts[2]))
difference = today - folder_date
if difference.days >= 2:
full_path = os.path.join(folder_path, folder)
shutil.rmtree(full_path)
def get_file_count(folder_path):
file_count = 0
for _, _, files in os.walk(folder_path):
file_count += len(files)
return file_count
def get_conversion_job_folder():
'''
Returns the folder containing the job folders
'''
return settings.OUTPUT_DIR('ConversionJobs')
def get_folder_path(job_id):
'''
Returns the root job folder belonging to a job id
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/
'''
year, month, day, hour, minute, _, code = jobidentification.split_job_id(
job_id)
folder_path = get_conversion_job_folder()
folder_path = os.path.join(folder_path, year + '_' + month + '_' + day)
folder_path = os.path.join(folder_path, hour + '_' + minute + '_' + code)
return folder_path
def get_upload_folder_path(job_id):
'''
Returns the folder where uploads are stored (depending on job id)
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/1_upload/
'''
folder_path = get_folder_path(job_id)
folder_path = os.path.join(folder_path, '1_upload')
return folder_path
def get_extract_folder_path(job_id):
'''
Returns the folder where files from archives are extracted to (depending on job id)
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/2_extract/
'''
folder_path = get_folder_path(job_id)
folder_path = os.path.join(folder_path, '2_extract')
return folder_path
def get_output_folder_path(job_id):
'''
Returns the folder where converted files are stored (depending on job id)
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/3_output/
'''
folder_path = get_folder_path(job_id)
folder_path = os.path.join(folder_path, '3_output')
return folder_path
def get_download_folder_path(job_id):
'''
Returns the folder where the download file is stored (depending on job id)
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/4_download/
'''
folder_path = get_folder_path(job_id)
folder_path = os.path.join(folder_path, '4_download')
return folder_path
def get_download_file_path(job_id):
'''
Returns the file path of the final archive with the converted files (depending on job id)
e.g. /OGRgeoConverter/ConversionJobs/2012_12_12/4_download/geoconverter_20121212_151500.zip
'''
year, month, day, hour, minute, second, _ = jobidentification.split_job_id(
job_id)
file_path = get_download_folder_path(job_id)
file_path = os.path.join(
file_path,
'geoconverter' +
'_' +
year +
month +
day +
'_' +
hour +
minute +
second +
'.zip')
return file_path
def download_file_exists(job_id):
download_file_path = get_download_file_path(job_id)
return os.path.exists(download_file_path)
|
geometalab/geoconverter
|
OGRgeoConverter/filesystem/filemanager.py
|
Python
|
mit
| 4,442
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class livecoin (Exchange):
def describe(self):
return self.deep_extend(super(livecoin, self).describe(), {
'id': 'livecoin',
'name': 'LiveCoin',
'countries': ['US', 'UK', 'RU'],
'rateLimit': 1000,
'hasCORS': False,
# obsolete metainfo interface
'hasFetchTickers': True,
'hasFetchCurrencies': True,
# new metainfo interface
'has': {
'fetchTickers': True,
'fetchCurrencies': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980768-f22fc424-638a-11e7-89c9-6010a54ff9be.jpg',
'api': 'https://api.livecoin.net',
'www': 'https://www.livecoin.net',
'doc': 'https://www.livecoin.net/api?lang=en',
},
'api': {
'public': {
'get': [
'exchange/all/order_book',
'exchange/last_trades',
'exchange/maxbid_minask',
'exchange/order_book',
'exchange/restrictions',
'exchange/ticker', # omit params to get all tickers at once
'info/coinInfo',
],
},
'private': {
'get': [
'exchange/client_orders',
'exchange/order',
'exchange/trades',
'exchange/commission',
'exchange/commissionCommonInfo',
'payment/balances',
'payment/balance',
'payment/get/address',
'payment/history/size',
'payment/history/transactions',
],
'post': [
'exchange/buylimit',
'exchange/buymarket',
'exchange/cancellimit',
'exchange/selllimit',
'exchange/sellmarket',
'payment/out/capitalist',
'payment/out/card',
'payment/out/coin',
'payment/out/okpay',
'payment/out/payeer',
'payment/out/perfectmoney',
'payment/voucher/amount',
'payment/voucher/make',
'payment/voucher/redeem',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.18 / 100,
'taker': 0.18 / 100,
},
},
})
def common_currency_code(self, currency):
return currency
async def fetch_markets(self):
markets = await self.publicGetExchangeTicker()
restrictions = await self.publicGetExchangeRestrictions()
restrictionsById = self.index_by(restrictions['restrictions'], 'currencyPair')
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['symbol']
symbol = id
base, quote = symbol.split('/')
coinRestrictions = self.safe_value(restrictionsById, symbol)
precision = {
'price': 5,
'amount': 8,
'cost': 8,
}
limits = {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
}
if coinRestrictions:
precision['price'] = self.safe_integer(coinRestrictions, 'priceScale', 5)
limits['amount']['min'] = self.safe_float(coinRestrictions, 'minLimitQuantity', limits['amount']['min'])
limits['price'] = {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
}
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'precision': precision,
'limits': limits,
'info': market,
}))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetInfoCoinInfo(params)
currencies = response['info']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['symbol']
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.common_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
active = (currency['walletStatus'] == 'normal')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': currency['name'],
'active': active,
'status': 'ok',
'fee': currency['withdrawFee'], # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': currency['minOrderAmount'],
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': currency['minOrderAmount'],
'max': None,
},
'withdraw': {
'min': currency['minWithdrawAmount'],
'max': math.pow(10, precision),
},
'deposit': {
'min': currency['minDepositAmount'],
'max': None,
},
},
}
result = self.append_fiat_currencies(result)
return result
def append_fiat_currencies(self, result=[]):
precision = 8
defaults = {
'info': None,
'active': True,
'status': 'ok',
'fee': None,
'precision': precision,
'limits': {
'withdraw': {'min': None, 'max': None},
'deposit': {'min': None, 'max': None},
'amount': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
},
}
currencies = [
{'id': 'USD', 'code': 'USD', 'name': 'US Dollar'},
{'id': 'EUR', 'code': 'EUR', 'name': 'Euro'},
{'id': 'RUR', 'code': 'RUR', 'name': 'Russian ruble'},
]
for i in range(0, len(currencies)):
currency = currencies[i]
code = currency['code']
result[code] = self.extend(defaults, currency)
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privateGetPaymentBalances()
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['currency']
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
if balance['type'] == 'total':
account['total'] = float(balance['value'])
if balance['type'] == 'available':
account['free'] = float(balance['value'])
if balance['type'] == 'trade':
account['used'] = float(balance['value'])
result[currency] = account
return self.parse_balance(result)
async def fetch_fees(self, params={}):
await self.load_markets()
commissionInfo = await self.privateGetExchangeCommissionCommonInfo()
commission = self.safe_float(commissionInfo, 'commission')
return {
'info': commissionInfo,
'maker': commission,
'taker': commission,
'withdraw': 0.0,
}
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetExchangeOrderBook(self.extend({
'currencyPair': self.market_id(symbol),
'groupByPrice': 'false',
'depth': 100,
}, params))
timestamp = orderbook['timestamp']
return self.parse_order_book(orderbook, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['best_bid']),
'ask': float(ticker['best_ask']),
'vwap': float(ticker['vwap']),
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetExchangeTicker(params)
tickers = self.index_by(response, 'symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetExchangeTicker(self.extend({
'currencyPair': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = trade['time'] * 1000
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['id']),
'order': None,
'type': None,
'side': trade['type'].lower(),
'price': trade['price'],
'amount': trade['quantity'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetExchangeLastTrades(self.extend({
'currencyPair': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'lastModificationTime')
if not timestamp:
timestamp = self.parse8601(order['lastModificationTime'])
trades = None
if 'trades' in order:
# TODO currently not supported by livecoin
# trades = self.parse_trades(order['trades'], market, since, limit)
trades = None
status = None
if order['orderStatus'] == 'OPEN' or order['orderStatus'] == 'PARTIALLY_FILLED':
status = 'open'
elif order['orderStatus'] == 'EXECUTED' or order['orderStatus'] == 'PARTIALLY_FILLED_AND_CANCELLED':
status = 'closed'
else:
status = 'canceled'
symbol = order['currencyPair']
base, quote = symbol.split('/')
type = None
side = None
if order['type'].find('MARKET') >= 0:
type = 'market'
else:
type = 'limit'
if order['type'].find('SELL') >= 0:
side = 'sell'
else:
side = 'buy'
price = self.safe_float(order, 'price', 0.0)
cost = self.safe_float(order, 'commissionByTrade', 0.0)
remaining = self.safe_float(order, 'remainingQuantity', 0.0)
amount = self.safe_float(order, 'quantity', remaining)
filled = amount - remaining
return {
'info': order,
'id': order['id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': {
'cost': cost,
'currency': quote,
},
}
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol:
market = self.market(symbol)
pair = market['id'] if market else None
request = {}
if pair:
request['currencyPair'] = pair
if since:
request['issuedFrom'] = int(since)
if limit:
request['endRow'] = limit - 1
response = await self.privateGetExchangeClientOrders(self.extend(request, params))
result = []
rawOrders = []
if response['data']:
rawOrders = response['data']
for i in range(0, len(rawOrders)):
order = rawOrders[i]
result.append(self.parse_order(order, market))
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
result = await self.fetch_orders(symbol, since, limit, self.extend({
'openClosed': 'OPEN',
}, params))
return result
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
result = await self.fetch_orders(symbol, since, limit, self.extend({
'openClosed': 'CLOSED',
}, params))
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostExchange' + self.capitalize(side) + type
market = self.market(symbol)
order = {
'quantity': self.amount_to_precision(symbol, amount),
'currencyPair': market['id'],
}
if type == 'limit':
order['price'] = self.price_to_precision(symbol, price)
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': str(response['orderId']),
}
async def cancel_order(self, id, symbol=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
currencyPair = market['id']
response = await self.privatePostExchangeCancellimit(self.extend({
'orderId': id,
'currencyPair': currencyPair,
}, params))
message = self.safe_string(response, 'message', self.json(response))
if 'success' in response:
if not response['success']:
raise InvalidOrder(message)
elif 'cancelled' in response:
if response['cancelled']:
return response
else:
raise OrderNotFound(message)
raise ExchangeError(self.id + ' cancelOrder() failed: ' + self.json(response))
async def fetch_deposit_address(self, currency, params={}):
request = {
'currency': currency,
}
response = await self.privateGetPaymentGetAddress(self.extend(request, params))
address = self.safe_string(response, 'wallet')
return {
'currency': currency,
'address': address,
'status': 'ok',
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
query = self.urlencode(self.keysort(params))
if method == 'GET':
if params:
url += '?' + query
if api == 'private':
self.check_required_credentials()
if method == 'POST':
body = query
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha256)
headers = {
'Api-Key': self.apiKey,
'Sign': signature.upper(),
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if code >= 300:
if body[0] == "{":
response = json.loads(body)
if 'errorCode' in response:
error = response['errorCode']
if error == 1:
raise ExchangeError(self.id + ' ' + self.json(response))
elif error == 2:
if 'errorMessage' in response:
if response['errorMessage'] == 'User not found':
raise AuthenticationError(self.id + ' ' + response['errorMessage'])
else:
raise ExchangeError(self.id + ' ' + self.json(response))
elif (error == 10) or (error == 11) or (error == 12) or (error == 20) or (error == 30) or (error == 101) or (error == 102):
raise AuthenticationError(self.id + ' ' + self.json(response))
elif error == 31:
raise NotSupported(self.id + ' ' + self.json(response))
elif error == 32:
raise ExchangeError(self.id + ' ' + self.json(response))
elif error == 100:
raise ExchangeError(self.id + ': Invalid parameters ' + self.json(response))
elif error == 103:
raise InvalidOrder(self.id + ': Invalid currency ' + self.json(response))
elif error == 104:
raise InvalidOrder(self.id + ': Invalid amount ' + self.json(response))
elif error == 105:
raise InvalidOrder(self.id + ': Unable to block funds ' + self.json(response))
else:
raise ExchangeError(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'success' in response:
if not response['success']:
raise ExchangeError(self.id + ' error: ' + self.json(response))
return response
|
tritoanst/ccxt
|
python/ccxt/async/livecoin.py
|
Python
|
mit
| 20,475
|
"""
[2015-05-11] Challenge #214 [Easy] Calculating the standard deviation
http://www.reddit.com/r/dailyprogrammer/comments/35l5eo/20150511_challenge_214_easy_calculating_the/
Description
Standard deviation is one of the most basic measurments in statistics. For some collection of values (known as a "population" in statistics), it measures how dispersed those values are. If the standard deviation is high, it means that the values in the population are very spread out; if it's low, it means that the values are tightly clustered around the mean value.
For today's challenge, you will get a list of numbers as input which will serve as your statistical population, and you are then going to calculate the standard deviation of that population. There are statistical packages for many programming languages that can do this for you, but you are highly encouraged not to use them: the spirit of today's challenge is to implement the standard deviation function yourself.
The following steps describe how to calculate standard deviation for a collection of numbers. For this example, we will use the following values:
5 6 11 13 19 20 25 26 28 37
First, calculate the average (or mean) of all your values, which is defined as the sum of all the values divided by the total number of values in the population. For our example, the sum of the values is 190 and since there are 10 different values, the mean value is 190/10 = 19
Next, for each value in the population, calculate the difference between it and the mean value, and square that difference. So, in our example, the first value is 5 and the mean 19, so you calculate (5 - 19)2 which is equal to 196. For the second value (which is 6), you calculate (6 - 19)2 which is equal to 169, and so on.
Calculate the sum of all the values from the previous step. For our example, it will be equal to 196 + 169 + 64 + ... = 956.
Divide that sum by the number of values in your population. The result is known as the variance of the population, and is equal to the square of the standard deviation. For our example, the number of values in the population is 10, so the variance is equal to 956/10 = 95.6.
Finally, to get standard deviation, take the square root of the variance. For our example, sqrt(95.6) ≈ 9.7775.
Formal inputs & outputs
Input
The input will consist of a single line of numbers separated by spaces. The numbers will all be positive integers.
Output
Your output should consist of a single line with the standard deviation rounded off to at most 4 digits after the decimal point.
Sample inputs & outputs
Input 1
5 6 11 13 19 20 25 26 28 37
Output 1
9.7775
Input 2
37 81 86 91 97 108 109 112 112 114 115 117 121 123 141
Output 2
23.2908
Challenge inputs
Challenge input 1
266 344 375 399 409 433 436 440 449 476 502 504 530 584 587
Challenge input 2
809 816 833 849 851 961 976 1009 1069 1125 1161 1172 1178 1187 1208 1215 1229 1241 1260 1373
Notes
For you statistics nerds out there, note that this is the population standard deviation, not the sample standard deviation. We are, after all, given the entire population and not just a sample.
If you have a suggestion for a future problem, head on over to /r/dailyprogrammer_ideas and let us know about it!
"""
# 1st implementation: clear, short, matches the problem definition
def standard_deviation_1(input_str):
list = [int(s) for s in input_str.split()]
count = len(list)
avg = sum(list) / count
variance = sum((n - avg)**2 for n in list) / count
return variance**0.5
# 2nd implementation: 1 pass, O(1) memory usage
def standard_deviation(input_str):
n_sum, n2_sum, count = 0, 0, 0
for s in input_str.split():
n = int(s)
n_sum += n
n2_sum += n*n
count += 1
if count == 0:
return None
avg = n_sum / count
return (n2_sum/count - n_sum*n_sum/(count*count))**0.5
def tests():
assert math.trunc(standard_deviation('5 6 11 13 19 20 25 26 28 37') * 10000.0) == 97775, 'Test 1'
assert math.trunc(standard_deviation('37 81 86 91 97 108 109 112 112 114 115 117 121 123 141') * 10000.0) == 232908, 'Test 2'
assert math.trunc(standard_deviation('266 344 375 399 409 433 436 440 449 476 502 504 530 584 587') * 10000.0) == 836615, 'Test 3'
assert math.trunc(standard_deviation('809 816 833 849 851 961 976 1009 1069 1125 1161 1172 1178 1187 1208 1215 1229 1241 1260 1373') * 10000.0) == 1701272, 'Test 4'
print('All tests passed')
if __name__ == '__main__':
tests()
# Python 3.4 => import statistics; statistics.pstdev(list)
# Others => import numpy; numpy.std()
|
feliposz/daily-programmer-solutions
|
E214_StandardDeviation.py
|
Python
|
mit
| 4,596
|
'''
DataTable: a flask/sqlachemy module to generate HTML with server side data.
The project use datatable from http://www.datatables.net/
'''
__all__ = ('Table', 'Column')
import simplejson
from flask import url_for, request
from sqlalchemy import asc, desc
class Column(object):
def __init__(self, name, field, display=None, formatter=None, width=None):
super(Column, self).__init__()
self.name = name
self.field = field
self.display = display
self.formatter = formatter
self.width = width
def __html__(self):
return '<th>%s</th>' % self.name
def __js_def__(self, index, out):
if self.width:
out.append({'sWidth': self.width, 'aTargets': [index]})
def get_field(self, entry):
if self.display:
value = self.display(entry)
else:
value = getattr(entry, self.field)
if self.formatter:
return self.formatter(value)
return value
class Table(object):
__uniqid = 0
db_table = None
db_session = None
display_length = 20
activate_sort = True
activate_info = True
activate_paginate = True
activate_scroll_infinite = False
activate_filter = True
activate_length_change = True
activate_scroll_collapse = True
pagination_type = 'full_numbers'
scroll_x = ''
scroll_y = ''
href_link = None
def __init__(self, **kwargs):
super(Table, self).__init__()
self.html_id = kwargs.get('html_id', None)
if self.html_id is None:
Table.__uniqid += 1
self.html_id = 'datatable%d' % Table.__uniqid
def query(self):
return self.db_session.query(self.db_table)
def ajax(self):
q = self.query()
# total number of entries
count = q.count()
# search
if 'sSearch' in request.args:
search = None
for col in self.columns:
field = getattr(self.db_table, col.field)
field = field.like('%%%s%%' % request.args['sSearch'])
if search is None:
search = field
else:
search = search | field
q = q.filter(search)
# sorting
if 'iSortingCols' in request.args:
field = self.columns[int(request.args['iSortCol_0'])].field
db_field = getattr(self.db_table, field)
if request.args['sSortDir_0'] == 'asc':
db_field = asc(db_field)
else:
db_field = desc(db_field)
q = q.order_by(db_field)
# get the number after filter
count_filtered = q.count()
# pagination
if self.activate_scroll_infinite:
limit = self.display_length
else:
limit = request.args['iDisplayLength']
offset = request.args['iDisplayStart']
entries = q.offset(offset).limit(limit)
# construct the output
data = []
columns = self.columns
for entry in entries:
data.append([col.get_field(entry) for col in columns])
return simplejson.dumps({
'sEcho': request.args['sEcho'],
'iTotalRecords': count,
'iTotalDisplayRecords': count_filtered,
'aaData': data
})
def __json_columns_defs__(self):
out = []
for index, col in enumerate(self.columns):
col.__js_def__(index, out)
return simplejson.dumps(out)
def __js_rowclick__(self):
return ''
def __html_columns__(self):
out = ['<tr>']
for col in self.columns:
out.append(col.__html__())
out.append('</tr>')
return ''.join(out)
def __html__(self):
data = {
'html_id': self.html_id,
'columns': self.__html_columns__(),
'click_callback': self.__js_rowclick__(),
# datatable
'iDisplayLength': str(int(self.display_length)),
'bSort': str(bool(self.activate_sort)).lower(),
'bInfo': str(bool(self.activate_info)).lower(),
'bPaginate': str(bool(self.activate_paginate)).lower(),
'bScrollInfinite': str(bool(self.activate_scroll_infinite)).lower(),
'bScrollCollapse': str(bool(self.activate_scroll_collapse)).lower(),
'bFilter': str(bool(self.activate_filter)).lower(),
'bLengthChange': str(bool(self.activate_length_change)).lower(),
'sScrollX': str(self.scroll_x),
'sScrollY': str(self.scroll_y),
'sPaginationType': str(self.pagination_type),
'sAjaxSource': url_for(self.source),
'aoColumnDefs': self.__json_columns_defs__()
}
html = '''
<script type="text/javascript">
$(document).ready(function() {
$("#%(html_id)s").dataTable({
'bJQueryUI': true,
'bProcessing': true,
'bServerSide': true,
'bScrollInfinite': %(bScrollInfinite)s,
'bScrollCollapse': %(bScrollCollapse)s,
'bSort': %(bSort)s,
'bInfo': %(bInfo)s,
'bFilter': %(bFilter)s,
'bLengthChange': %(bLengthChange)s,
'bPaginate': %(bPaginate)s,
'iDisplayLength': %(iDisplayLength)s,
'sAjaxSource': '%(sAjaxSource)s',
'sPaginationType': '%(sPaginationType)s',
'sScrollY': '%(sScrollY)s',
'sScrollX': '%(sScrollX)s',
'aoColumnDefs': %(aoColumnDefs)s
});
});
$("#%(html_id)s tbody tr").live('click', function() {
%(click_callback)s
});
</script>
<table id="%(html_id)s">
<thead>
%(columns)s
</thead>
<tbody>
</tbody>
</table>
''' % data
return html
|
kivy/p4a-cloud
|
master/web/table.py
|
Python
|
mit
| 6,001
|
import sys
try:
import uerrno
try:
import uos_vfs as uos
open = uos.vfs_open
except ImportError:
import uos
except ImportError:
print("SKIP")
sys.exit()
try:
uos.VfsFat
except AttributeError:
print("SKIP")
sys.exit()
class RAMFS:
SEC_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.SEC_SIZE)
def readblocks(self, n, buf):
#print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
for i in range(len(buf)):
buf[i] = self.data[n * self.SEC_SIZE + i]
def writeblocks(self, n, buf):
#print("writeblocks(%s, %x)" % (n, id(buf)))
for i in range(len(buf)):
self.data[n * self.SEC_SIZE + i] = buf[i]
def ioctl(self, op, arg):
#print("ioctl(%d, %r)" % (op, arg))
if op == 4: # BP_IOCTL_SEC_COUNT
return len(self.data) // self.SEC_SIZE
if op == 5: # BP_IOCTL_SEC_SIZE
return self.SEC_SIZE
try:
bdev = RAMFS(50)
except MemoryError:
print("SKIP")
sys.exit()
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/ramdisk')
uos.chdir('/ramdisk')
try:
vfs.mkdir("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EEXIST)
try:
vfs.remove("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EISDIR)
try:
vfs.remove("no_file.txt")
except OSError as e:
print(e.args[0] == uerrno.ENOENT)
try:
vfs.rename("foo_dir", "/null/file")
except OSError as e:
print(e.args[0] == uerrno.ENOENT)
# file in dir
with open("foo_dir/file-in-dir.txt", "w+t") as f:
f.write("data in file")
with open("foo_dir/file-in-dir.txt", "r+b") as f:
print(f.read())
with open("foo_dir/sub_file.txt", "w") as f:
f.write("subdir file")
# directory not empty
try:
vfs.rmdir("foo_dir")
except OSError as e:
print(e.args[0] == uerrno.EACCES)
# trim full path
vfs.rename("foo_dir/file-in-dir.txt", "foo_dir/file.txt")
print(vfs.listdir("foo_dir"))
vfs.rename("foo_dir/file.txt", "moved-to-root.txt")
print(vfs.listdir())
# check that renaming to existing file will overwrite it
with open("temp", "w") as f:
f.write("new text")
vfs.rename("temp", "moved-to-root.txt")
print(vfs.listdir())
with open("moved-to-root.txt") as f:
print(f.read())
# valid removes
vfs.remove("foo_dir/sub_file.txt")
vfs.rmdir("foo_dir")
print(vfs.listdir())
# disk full
try:
bsize = vfs.statvfs("/ramdisk")[0]
free = vfs.statvfs("/ramdisk")[2] + 1
f = open("large_file.txt", "wb")
f.write(bytearray(bsize * free))
except OSError as e:
print("ENOSPC:", e.args[0] == 28) # uerrno.ENOSPC
|
Peetz0r/micropython-esp32
|
tests/extmod/vfs_fat_fileio2.py
|
Python
|
mit
| 2,661
|
# encoding: utf-8
from django.db import migrations, models
def forwards(apps, schema_editor):
if not schema_editor.connection.alias == 'default':
return
# Your migration code goes here
model = apps.get_model('board', 'Status')
model.objects.create(
name="Down",
slug="down",
image="cross-circle",
severity=40,
description="The service is currently down"
)
model.objects.create(
name="Up",
slug="up",
image="tick-circle",
severity=10,
description="The service is up"
)
model.objects.create(
name="Warning",
slug="warning",
image="exclamation",
severity=30,
description="The service is experiencing intermittent problems"
)
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial')
]
operations = [
migrations.RunPython(forwards),
]
|
aksalj/whiskerboard
|
board/migrations/0002_initial_statuses.py
|
Python
|
mit
| 959
|
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '6d99ct', '6dgo9x', '6dnlrp', '6dt8k7', '6dzxrn', '6e762p', '6edj0c' ]
flaskport = 8890
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
comments += praw.helpers.flatten_tree(submission.comments)
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
foobarbazblarg/stayclean
|
stayclean-2017-june/serve-signups-with-flask.py
|
Python
|
mit
| 8,193
|
import pygame
from configuraciones import *
class limite(pygame.sprite.Sprite):
al = 800
an = 2
def __init__(self, cl = ROJO):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([self.al,self.an])
self.cl = cl
self.image.fill(self.cl)
self.rect= self.image.get_rect()
def setPos(self,x,y):
self.rect.x = x
self.rect.y = y
class limiteEnemigo(pygame.sprite.Sprite):
al = 2
an = 650
def __init__(self, cl = ROJO):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([self.al,self.an])
self.cl = cl
self.image.fill(self.cl)
self.rect= self.image.get_rect()
def setPos(self,x,y):
self.rect.x = x
self.rect.y = y
|
Jofemago/Computacion-Grafica
|
SPACE INVADERS/Fronteras.py
|
Python
|
mit
| 785
|
from myhdl import block, always, Signal, modbv, concat, intbv, always_comb, instances
from hdmi.cores import control_token_0, control_token_1, control_token_2, control_token_3
INIT = 1
SEARCH = 2
BIT_SLIP = 4
RCVD_CTRL_TKN = 8 # Received control token
BLANK_PERIOD = 16
PHASE_ALIGNED = 32 # Phase Alignment Achieved
nSTATES = 6
@block
def phase_aligner(reset, clock, s_data, bit_slip, flip_gear, phase_aligned):
"""
This impliments the phase alignment logic modelled after the phase aligner module from
xapp495 application notes.
Args:
reset: reset signal
clock: pixel clock
s_data: the 10-bit TMDS data
bit_slip: An input signal
flip_gear: An input signal
phase_aligned: denotes whether the phase is aligned
Returns:
myhdl.instances() : A list of myhdl instances.
"""
open_eye_counter_width = 3
ctrl_tkn_counter_width = 7
search_timer_width = 12
blank_period_counter_width = 1
received_ctrl_token, _received_ctrl_token, blank_begin = [Signal(False) for _ in range(3)]
@always(clock.posedge)
def assign_control():
received_ctrl_token.next = (s_data == control_token_0) or (s_data == control_token_1) \
or (s_data == control_token_2) or (s_data == control_token_3)
_received_ctrl_token.next = received_ctrl_token
blank_begin.next = not _received_ctrl_token and received_ctrl_token
ctrl_tkn_search_timer = Signal(modbv(0)[search_timer_width:])
ctrl_tkn_search_reset = Signal(False)
@always(clock.posedge)
def search_timer():
if ctrl_tkn_search_reset:
ctrl_tkn_search_timer.next = 0
else:
ctrl_tkn_search_timer.next = ctrl_tkn_search_timer + 1
ctrl_tkn_search_tout = Signal(False)
@always(clock.posedge)
def search_time_out():
if ctrl_tkn_search_timer == (1 << search_timer_width + 1) - 1:
ctrl_tkn_search_tout.next = True
else:
ctrl_tkn_search_tout.next = False
ctrl_tkn_event_timer = Signal(modbv(0)[ctrl_tkn_counter_width:])
ctrl_tkn_event_reset = Signal(False)
@always(clock.posedge)
def event_timer():
if ctrl_tkn_event_reset:
ctrl_tkn_event_timer.next = 0
else:
ctrl_tkn_event_timer.next = ctrl_tkn_event_timer + 1
ctrl_tkn_event_tout = Signal(False)
@always(clock.posedge)
def event_time_out():
if ctrl_tkn_event_timer == (1 << ctrl_tkn_counter_width + 1) - 1:
ctrl_tkn_event_tout.next = True
else:
ctrl_tkn_event_tout.next = False
# Below starts the phase alignment state machine
curr_state = Signal(intbv(1)[nSTATES:])
next_state = Signal(intbv(0)[nSTATES:])
@always(clock.posedge, reset.posedge)
def assign_state():
if reset:
curr_state.next = INIT
else:
curr_state.next = next_state
blank_period_counter = Signal(modbv(0)[blank_period_counter_width:])
@always_comb
def switch_state():
if curr_state == INIT:
next_state.next = SEARCH if ctrl_tkn_search_tout else INIT
elif curr_state == SEARCH:
if blank_begin:
next_state.next = RCVD_CTRL_TKN
else:
next_state.next = BIT_SLIP if ctrl_tkn_search_tout else SEARCH
elif curr_state == BIT_SLIP:
next_state.next = SEARCH
elif curr_state == RCVD_CTRL_TKN:
if received_ctrl_token:
next_state.next = BLANK_PERIOD if ctrl_tkn_event_tout else RCVD_CTRL_TKN
else:
next_state.next = SEARCH
elif curr_state == BLANK_PERIOD:
if blank_period_counter == (1 << blank_period_counter_width + 1) - 1:
next_state.next = PHASE_ALIGNED
else:
next_state.next = SEARCH
elif curr_state == PHASE_ALIGNED:
next_state.next = PHASE_ALIGNED
bit_slip_counter = Signal(modbv(0)[3:0])
@always(clock.posedge, reset.posedge)
def assign():
if reset:
phase_aligned.next = 0
bit_slip.next = 0
ctrl_tkn_search_reset.next = 1
ctrl_tkn_event_reset.next = 1
bit_slip_counter.next = 0
flip_gear.next = 0
blank_period_counter.next = 0
else:
if curr_state == INIT:
ctrl_tkn_search_reset.next = 0
ctrl_tkn_event_reset.next = 1
bit_slip.next = 0
bit_slip_counter.next = 0
if __debug__:
phase_aligned.next = 1
flip_gear.next = 0
blank_period_counter.next = 0
elif curr_state == SEARCH:
ctrl_tkn_search_reset.next = 0
ctrl_tkn_event_reset.next = 1
bit_slip.next = 0
phase_aligned.next = 0
if __debug__:
phase_aligned.next = 1
elif curr_state == BIT_SLIP:
ctrl_tkn_search_reset.next = 1
bit_slip.next = 1
bit_slip_counter.next = bit_slip_counter + 1
flip_gear.next = bit_slip_counter[2]
elif curr_state == RCVD_CTRL_TKN:
ctrl_tkn_search_reset.next = 0
ctrl_tkn_event_reset.next = 0
elif curr_state == BLANK_PERIOD:
blank_period_counter.next = blank_period_counter + 1
elif curr_state == PHASE_ALIGNED:
phase_aligned.next = 1
return instances()
|
srivatsan-ramesh/HDMI-Source-Sink-Modules
|
hdmi/cores/receiver/phase_aligner.py
|
Python
|
mit
| 5,642
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# tiff.py - tiff file parsing
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Thomas Schueppel <stain@acm.org>
# Maintainer: Dirk Meyer <dischi@freevo.org>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
import struct
import logging
# import kaa.metadata.image core
from . import core
from . import IPTC
# get logging object
log = logging.getLogger('metadata')
MOTOROLASIGNATURE = 'MM\x00\x2a'
INTELSIGNATURE = 'II\x2a\x00'
# http://partners.adobe.com/asn/developer/pdfs/tn/TIFF6.pdf
class TIFF(core.Image):
table_mapping = { 'IPTC': IPTC.mapping }
def __init__(self,file):
core.Image.__init__(self)
self.iptc = None
self.mime = 'image/tiff'
self.type = 'TIFF image'
self.intel = 0
iptc = {}
header = file.read(8)
if header[:4] == MOTOROLASIGNATURE:
self.intel = 0
(offset,) = struct.unpack(">I", header[4:8])
file.seek(offset)
(len,) = struct.unpack(">H", file.read(2))
app = file.read(len*12)
for i in range(len):
(tag, type, length, value, offset) = \
struct.unpack('>HHIHH', app[i*12:i*12+12])
if tag == 0x8649:
file.seek(offset,0)
iptc = IPTC.parseiptc(file.read(1000))
elif tag == 0x0100:
if value != 0:
self.width = value
else:
self.width = offset
elif tag == 0x0101:
if value != 0:
self.height = value
else:
self.height = offset
elif header[:4] == INTELSIGNATURE:
self.intel = 1
(offset,) = struct.unpack("<I", header[4:8])
file.seek(offset,0)
(len,) = struct.unpack("<H", file.read(2))
app = file.read(len*12)
for i in range(len):
(tag, type, length, offset, value) = \
struct.unpack('<HHIHH', app[i*12:i*12+12])
if tag == 0x8649:
file.seek(offset)
iptc = IPTC.parseiptc(file.read(1000))
elif tag == 0x0100:
if value != 0:
self.width = value
else:
self.width = offset
elif tag == 0x0101:
if value != 0:
self.height = value
else:
self.height = offset
else:
raise core.ParseError()
if iptc:
self._appendtable('IPTC', iptc)
Parser = TIFF
|
jtackaberry/stagehand
|
external/metadata/image/tiff.py
|
Python
|
mit
| 3,914
|
#!/usr/bin/python
# filename: celeryconfig.py
###########################################################################
#
# Copyright (c) 2014 Bryan Briney. All rights reserved.
#
# @version: 1.0.0
# @author: Bryan Briney
# @license: MIT (http://opensource.org/licenses/MIT)
#
###########################################################################
# config file for Celery Daemon
# RabbitMQ broker
broker_url = 'pyamqp://abcloud:abcloud@master:5672/abcloud_host'
# Redis broker
# BROKER_URL = 'redis://master:6379/0'
# RabbitMQ backend
result_backend = 'rpc://abcloud:abcloud@master:5672/abcloud_host'
# Redis backend
# CELERY_RESULT_BACKEND = 'redis://master:6379/0'
# Additional Redis-specific configs
broker_transport_options = {'fanout_prefix': True,
'fanout_patterns': True,
'visibility_timeout': 3600}
# Other configs
worker_max_tasks_per_child = 320
worker_prefetch_multiplier = 1
task_acks_late = True
|
briney/abstar
|
abstar/celeryconfig.py
|
Python
|
mit
| 985
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Jul 3, 2014
@author: anroco
How to know if an element exists within a tuple in python?
¿como saber si un elemento existe dentro de una tupla python?
'''
#create a tuple
tupla = ('a', 'b', 'c', 'd', 'e')
print (tupla)
#use the in statement
print('c' in tupla)
print(2 in tupla)
|
OxPython/Python_tuples_exists
|
src/exists_item_tuples.py
|
Python
|
epl-1.0
| 344
|
from django.shortcuts import render
from django.http import HttpResponse
from article.models import Article
from datetime import datetime
# Create your views here.
def home(request):
post_list = Article.objects.all()
return render(request, 'home.html', {'post_list' : post_list})
def detail(request, my_args):
post = Article.objects.all()[int(my_args)]
return render(request,'details.html',{'post':post,'page':(post.id-1)})
#return HttpResponse("You are looking at my_args %s ." % str)
|
Moon84/my_blog
|
article/views.py
|
Python
|
epl-1.0
| 493
|
#!/usr/bin/python
import subprocess, re
def get_cpu_info():
command = "cat /proc/cpuinfo"
all_info = subprocess.check_output(command, shell=True).strip()
for line in all_info.split("\n"):
if "model name" in line:
model_name = re.sub(".*model name.*:", "", line,1).strip()
return model_name.replace("(R)","").replace("(TM)", "")
|
JeffsanC/uavs
|
src/rpg_vikit/vikit_py/src/vikit_py/cpu_info.py
|
Python
|
gpl-2.0
| 340
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
TO DO:
- Guardar las opciones usadas anteriormente
- Hacer una base de datos sobre proteínas, con su peso molecular y su número
de residuos, para calcular rápidamente el factor de corrección.
"""
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import glib
import os
import gettext
from def_config import *
from selection_page import SelectionPage
from save_options_page import SaveOptionsPage
from summary_page import SummaryPage
from process_form import ProcessForm
from options_page import OptionsPage
from tools import ask_yesno, ProcessOptions, _
from jws_processing import *
from jws_filechooser import ff_txt
class JwsAssistant(gtk.Assistant):
def __init__(self):
self.blank_msg = \
_("""The selected blank is among the files to be processed.
Do you really want to continue?""")
self.heterogeneus_xvalues_msg = \
_("""No every file has the same data pitch o data does
not always begin at the same wavelength. Do you want to
save data with a column for X values for each spectrum?
""")
self.intro_msg = \
_("""Welcome to jwsProcessor.
This assistant consists on four steps:
#1 Select the .jws files to be processed
#2 Choose the processing options
#3 Process of the files
#4 Save the results
""")
# Set Internal variables
self.options = ProcessOptions()
self.spc_list = []
self.processed_files = []
self.processed_list = []
self.success_count = 0
self.current_folder = config.get(CFGFILE_SECTION, LAST_DIR_STR)
self.__no_files_processed = False
self._create_widgets()
def _create_widgets(self):
gtk.Assistant.__init__(self)
# Page 0
ip = gtk.Label(self.intro_msg)
self.append_page(ip)
self.set_page_type(ip, gtk.ASSISTANT_PAGE_INTRO)
self.set_page_title(ip, _("jwsProcessor assistant"))
self.set_page_complete(ip, True)
self.intro_page = ip
# Page 1
sp = SelectionPage(self, self.current_folder)
self.append_page(sp)
self.set_page_type(sp, gtk.ASSISTANT_PAGE_CONTENT)
self.set_page_title(sp, _("#1 - Select spectra"))
self.set_page_complete(sp, False)
self.selection_page = sp
# Page 2
op = OptionsPage(options=self.options,
current_folder = sp.current_folder,
assistant=self)
self.append_page(op)
self.set_page_type(op, gtk.ASSISTANT_PAGE_CONFIRM)
self.set_page_title(op, _("#2 - Processing options"))
self.set_page_complete(op, False)
self.options_page = op
# Page 3
fp = SummaryPage(self)
self.append_page(fp)
self.set_page_title(fp, _("#3 - Results"))
self.set_page_complete(fp, False)
self.summary_page = fp
# Page 4
sop = SaveOptionsPage(self)
self.append_page(sop)
self.set_page_type(sop, gtk.ASSISTANT_PAGE_CONFIRM)
self.set_page_title(sop, _("#4 - Save results as..."))
self.set_page_complete(sop, True)
self.save_options_page = sop
# Page 5
fp = gtk.Label(_("Saving files..."))
self.append_page(fp)
self.set_page_type(fp, gtk.ASSISTANT_PAGE_SUMMARY)
self.set_page_title(fp, _("Summary"))
self.set_page_complete(fp, False)
self.final_page = fp
# Restart assistant button
self.restart_button = gtk.Button(_("Start again"))
self.restart_button.set_image(gtk.image_new_from_stock(gtk.STOCK_GOTO_FIRST,
gtk.ICON_SIZE_BUTTON) )
self.restart_button.connect("clicked", self._restart_clicked_cb)
self.restart_button.show_all()
# Configure window
self.set_title("jwsProcessor")
self.set_border_width(12)
self.set_position (gtk.WIN_POS_CENTER)
self.set_forward_page_func(self._fwd_page_func)
self.show_all()
self.connect("delete-event", self._close_cb)
self.connect("prepare", self._prepare_cb)
self.connect("apply", self._apply_cb)
self.connect("close", self._close_cb)
self.connect("cancel", self._close_cb)
def _close_cb(self, widget, event=None):
config.set(CFGFILE_SECTION, LAST_DIR_STR, self.current_folder)
gtk.main_quit()
def _restart_clicked_cb(self, widget):
self.selection_page.reset()
self.remove_action_widget(self.restart_button)
self.set_current_page(0)
def _apply_cb(self, widget):
""" Este callback es llamado cuando el usuario ha pulsado el botón
"Aplicar" en una página de tipo ASSISTANT_PAGE_CONFIRM, pero siempre
una vez que se ha mostrado la página siguiente a dicha página.
En este wizzard tenemos 2 páginas de confirmación, la página 2 en la
que se seleccionan las opciones de procesado y la página 4 en la que
se seleccionan las opciones de guardado. Por tanto en esta página se
llevarán a cabo:
#1 El procesado de los datos (cuando página actual == 3)
#2 El guardado de los datos procesados (cuando la página actual==5)
"""
page = self.get_nth_page(self.get_current_page())
print "apply_cb, current_page=", page #debug
#if page == self.summary_page:
if page == self.options_page:
# Check if blank is included in spectra list(?)
# if it is, then allow the user to re-run the options dialo
self.options = self.options_page.get_options()
if self.options.subtract_blank:
if self.options.blank_fn in self.selection_page.spectra_db:
if ask_yesno(title=self.blank_msg, parent=self)!=gtk.RESPONSE_YES:
self.set_current_page(2)
return
# Process spectra
self.spc_list = self.selection_page.get_ordered_data()
process_form = ProcessForm()
processor = JwsProcessor(process_form)
results = processor.process_files(options=self.options,
spectra_list=self.spc_list)
if results:
(self.processed_files, self.processed_list, self.success_count) = results
else:
self.success_count = 0
# Show the results in self.summary_page
info_buffer = processor.report_buffer
process_form.destroy()
if self.success_count > 0:
mensaje = _('Successfully processed files: %(#)d out of %(##)d.') \
% {'#': self.success_count, '##':len(self.spc_list)}
image_type = gtk.STOCK_DIALOG_INFO
else:
mensaje = _("Spectra could not be processed!")
image_type = gtk.STOCK_DIALOG_ERROR
self.__no_files_processed = True
self.summary_page.set_message(mensaje, info_buffer, image_type)
self.set_page_complete(self.summary_page, True)
#elif page == self.final_page:
elif page == self.save_options_page:
# Save files
print self.save_options_page.get_option() #debug
save_option = self.save_options_page.get_option()
column_headers_option = self.save_options_page.get_column_headers_option()
if save_option != self.save_options_page.SO_DONT_SAVE:
files_saved = self._save_files( save_option,
column_headers_option,
self.processed_list,
self.processed_files,
self.success_count)
print "Files saved = ", files_saved #debug
if files_saved is None:
self.set_current_page(4)
else:
msg = _("Successfully saved files: %(#)d out of %(##)d") % \
{'#':files_saved, '##':len(self.processed_list)}
self.final_page.set_text(msg)
else:
self.final_page.set_text(_("Results have not been saved."))
def _fwd_page_func(self, current_page):
return_val = current_page+1
page = self.get_nth_page(current_page)
if page == self.summary_page:
if self.__no_files_processed:
return_val = 5
return return_val
def _prepare_cb(self, assistant, page):
if page == self.options_page:
self.current_folder = self.selection_page.current_folder
self.options_page.current_folder = self.selection_page.current_folder
self.options_page.check_blank_options()
elif page == self.summary_page:
self.current_folder = self.options_page.current_folder
elif page == self.save_options_page:
pass
elif page == self.final_page:
if self.__no_files_processed:
self.final_page.set_text(_("No file could be processed."))
self.add_action_widget(self.restart_button)
def _save_files(self, save_option, write_column_headers, processed_list,
processed_files, success_count):
"""
Parámetros:
- save_option: de qué forma guardar los espectros
- write_column_headers: incluir o no el nombre de cada espectro en
el archivo
- processed_list, processed_files: listas con los datos procesados
y con el nombre de los archivos originales respectivamente
- success_count: es el número de archivos que se deben guardar.
Devuelve el número de espectros guardados, o None si no se han
guardado los esfpectros debido al usuario.
Si se devuelve None se debería volver a preguntar al usuario si desea
guardar los resultados.
"""
# 1. Hacer que el usuario escoja en qué fichero o en qué carpeta quiere
# guardar los resultados
if (save_option==self.save_options_page.SO_ONEFILE_ONEX) or \
(save_option==self.save_options_page.SO_ONEFILE_MULTIPLEX):
continuar = True
while continuar:
output_fn = self._choose_file_to_save()
continuar = False
# Salir si el usuario declina elegir un archivo:
if not output_fn:
return None
# Comprobar si el archivo existe, preguntar si sobreescribirlo:
elif os.path.exists(output_fn):
mensaje=_("The file %s already exist, do you want to overwrite it?") \
% os.path.split(output_fn)[1]
if ask_yesno( title=mensaje ) != gtk.RESPONSE_YES:
continuar = True
self.current_folder = os.path.split(output_fn)[0] ## update current folder!!
elif (save_option==self.save_options_page.SO_MULTIPLE_FILES):
output_folder = self._choose_folder_to_save()
# Salir si el usuario declina elegir una carpeta:
if not output_folder:
return None
self.current_folder = output_folder ## update current folder!!
# 2. Salvar los resultados según la opción elegida por el usuario
if (save_option==self.save_options_page.SO_ONEFILE_ONEX):
# Comprobar si todos los ficheros tienen el mismo eje x...
results = check_homogeneity(processed_files)
if results:
(startx, xpitch) = results
save_to_one_file_onex(processed_files, processed_list,
startx, xpitch,
output_fn, write_column_headers)
# Asumir que hemos guardado todos los espectros:
return_value = success_count
# ... si no lo tienen, entonces quizá el usuario quiera guardar los
# datos del eje x de cada expectro:
else:
if ask_yesno(title=heterogeneus_xdata_msg) == gtk.RESPONSE_YES:
save_to_one_file_multiplex(processed_files, processed_list,
output_fn)
# Asumir que hemos guardado todos los espectros:
return_value = success_count
else:
return_value = None
elif (save_option==self.save_options_page.SO_ONEFILE_MULTIPLEX):
save_to_one_file_multiplex(processed_files, processed_list,
output_fn)
# Asumir que hemos guardado todos los espectros:
return_value = success_count
elif (save_option==self.save_options_page.SO_MULTIPLE_FILES):
return_value = save_to_separate_files(processed_files, output_folder)
else:
return_value = None
return return_value
def _choose_file_to_save(self):
return_value = None
buttons = ( gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK )
fs_dialog = gtk.FileChooserDialog( title=_("Save results as..."),
action= gtk.FILE_CHOOSER_ACTION_SAVE,
buttons= buttons)
fs_dialog.add_filter(ff_txt)
fs_dialog.set_select_multiple(False)
fs_dialog.set_current_folder(self.selection_page.current_folder)
response = fs_dialog.run()
if response == gtk.RESPONSE_OK:
fn = fs_dialog.get_filename().decode('utf-8')
self.selection_page.current_folder = fs_dialog.get_current_folder().decode('utf-8')
return_value = fn
fs_dialog.destroy()
return return_value
def _choose_folder_to_save(self):
return_value = None
buttons = ( gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK )
fs_dialog = gtk.FileChooserDialog( title=_("Select a folder"),
action= gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons= buttons )
fs_dialog.set_select_multiple(False)
fs_dialog.set_current_folder(self.selection_page.current_folder)
response = fs_dialog.run()
if response == gtk.RESPONSE_OK:
folder = fs_dialog.get_filename().decode('utf-8')
self.selection_page.current_folder = fs_dialog.get_current_folder().decode('utf-8')
return_value = folder
fs_dialog.destroy()
return return_value
def main(debug=False):
localedir = localepath()
gettext.bindtextdomain('jwsprocessor', localedir)
gettext.textdomain('jwsprocessor')
cfgFile = config_load_validate()
# Load the icon for the window; here we just load one, default icon
try:
gtk.window_set_default_icon_from_file( imagepath("jwsprocessor.svg") )
except glib.GError:
gtk.window_set_default_icon_from_file( imagepath("jwsprocessor.png") )
app_window = JwsAssistant()
gtk.main()
config_save(cfgFile)
if debug:
printConfigParser() ##debug
if __name__=="__main__":
main()
|
vhernandez/jwsProcessor
|
src/jwsprocessor/main.py
|
Python
|
gpl-2.0
| 15,739
|
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Connect to Bitcoin server via JSON-RPC.
"""
from reversecoin.bitcoinrpc.proxy import AuthServiceProxy
from reversecoin.bitcoinrpc.exceptions import (wrap_exception, BitcoinException,
WalletPassphraseIncorrect,
WalletAlreadyUnlocked)
from reversecoin.bitcoinrpc.data import (ServerInfo, AccountInfo, AddressInfo, TransactionInfo, AddressValidation, WorkItem, MiningInfo)
class BitcoinConnection(object):
"""
A BitcoinConnection object defines a connection to a bitcoin server.
It is a thin wrapper around a JSON-RPC API connection.
Arguments to constructor:
- *user* -- Authenticate as user.
- *password* -- Authentication password.
- *host* -- Bitcoin JSON-RPC host.
- *port* -- Bitcoin JSON-RPC port.
"""
def __init__(self, user, password, host='localhost', port=8332,
use_https=False):
"""
Create a new bitcoin server connection.
"""
url = 'http{s}://{user}:{password}@{host}:{port}/'.format(
s='s' if use_https else '',
user=user, password=password, host=host, port=port)
self.url = url
self.proxy = AuthServiceProxy(url, exception_wrapper=wrap_exception)
def stop(self):
"""
Stop bitcoin server.
"""
self.proxy.stop()
def getblock(self, hash):
"""
Returns information about the given block hash.
"""
return self.proxy.getblock(hash)
def getblockcount(self):
"""
Returns the number of blocks in the longest block chain.
"""
return self.proxy.getblockcount()
def getblockhash(self, index):
"""
Returns hash of block in best-block-chain at index.
:param index: index ob the block
"""
return self.proxy.getblockhash(index)
def getblocknumber(self):
"""
Returns the block number of the latest block in the longest block chain.
Deprecated. Use getblockcount instead.
"""
return self.getblockcount()
def getconnectioncount(self):
"""
Returns the number of connections to other nodes.
"""
return self.proxy.getconnectioncount()
def getdifficulty(self):
"""
Returns the proof-of-work difficulty as a multiple of the minimum difficulty.
"""
return self.proxy.getdifficulty()
def getgenerate(self):
"""
Returns :const:`True` or :const:`False`, depending on whether generation is enabled.
"""
return self.proxy.getgenerate()
def setgenerate(self, generate, genproclimit=None):
"""
Enable or disable generation (mining) of coins.
Arguments:
- *generate* -- is :const:`True` or :const:`False` to turn generation on or off.
- *genproclimit* -- Number of processors that are used for generation, -1 is unlimited.
"""
if genproclimit is None:
return self.proxy.setgenerate(generate)
else:
return self.proxy.setgenerate(generate, genproclimit)
def gethashespersec(self):
"""
Returns a recent hashes per second performance measurement while generating.
"""
return self.proxy.gethashespersec()
def getinfo(self):
"""
Returns an :class:`~bitcoinrpc.data.ServerInfo` object containing various state info.
"""
return ServerInfo(**self.proxy.getinfo())
def getmininginfo(self):
"""
Returns an :class:`~bitcoinrpc.data.MiningInfo` object containing various
mining state info.
"""
return MiningInfo(**self.proxy.getmininginfo())
def getnewaddress(self, account=None):
"""
Returns a new bitcoin address for receiving payments.
Arguments:
- *account* -- If account is specified (recommended), it is added to the address book
so that payments received with the address will be credited to it.
"""
if account is None:
return self.proxy.getnewaddress()
else:
return self.proxy.getnewaddress(account)
def getaccountaddress(self, account):
"""
Returns the current bitcoin address for receiving payments to an account.
Arguments:
- *account* -- Account for which the address should be returned.
"""
return self.proxy.getaccountaddress(account)
def setaccount(self, bitcoinaddress, account):
"""
Sets the account associated with the given address.
Arguments:
- *bitcoinaddress* -- Bitcoin address to associate.
- *account* -- Account to associate the address to.
"""
return self.proxy.setaccount(bitcoinaddress, account)
def getaccount(self, bitcoinaddress):
"""
Returns the account associated with the given address.
Arguments:
- *bitcoinaddress* -- Bitcoin address to get account for.
"""
return self.proxy.getaccount(bitcoinaddress)
def getvault(self):
"""
Returns the vault associated with the given address.
Arguments:
- *bitcoinaddress* -- Bitcoin address to get account for.
"""
return self.proxy.getvault()
def getvaults(self):
"""
Returns the vaults in wallet.
"""
return self.proxy.getvaults()
def getpendingtransactions(self):
"""
Returns a list of pending vault transactions.
"""
return self.proxy.getpendingtransactions()
def getaddressesbyaccount(self, account):
"""
Returns the list of addresses for the given account.
Arguments:
- *account* -- Account to get list of addresses for.
"""
return self.proxy.getaddressesbyaccount(account)
def newvault(self, toaddress, tomaster_address, timeout, maxfees):
"""
Creates and returns new vault.
Arguments:
- *toaddress* -- Bitcoin address to send to.
- *tomaster_address* -- Bitcoin master address to send to.
- *timeout* -- Timeout for vault (Positive integeer, less than or equal to 100).
- *maxfees* -- Max fees (float, rounded to the nearest 0.00000001).
"""
return self.proxy.newvault(toaddress, tomaster_address, timeout, maxfees)
def sendtoaddress(self, bitcoinaddress, amount, comment=None, comment_to=None):
"""
Sends *amount* from the server's available balance to *bitcoinaddress*.
Arguments:
- *bitcoinaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.00000001).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.sendtoaddress(bitcoinaddress, amount)
elif comment_to is None:
return self.proxy.sendtoaddress(bitcoinaddress, amount, comment)
else:
return self.proxy.sendtoaddress(bitcoinaddress, amount, comment, comment_to)
def sendtovault(self, vault_address, amount, comment=None, comment_to=None):
"""
Sends *amount* from the server's available balance to *bitcoinaddress*.
Arguments:
- *vault_address* -- Vault address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.00000001).
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.sendtovault(vault_address, amount)
elif comment_to is None:
return self.proxy.sendtovault(vault_address, amount, comment)
else:
return self.proxy.sendtovault(vault_address, amount, comment, comment_to)
def fastwithdrawfromvault(self, fromaddress, toaddress, amount, comment=None, comment_to=None):
"""
Withdraws *amount* from the vault *fromaddress* to the specified *toaddress*.
Arguments:
- *fromaddress* -- Bitcoin vault address to send from.
- *toaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.00000001).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.fastwithdrawfromvault(fromaddress, toaddress, amount)
elif comment_to is None:
return self.proxy.fastwithdrawfromvault(fromaddress, toaddress, amount, comment)
else:
return self.proxy.fastwithdrawfromvault(fromaddress, toaddress, amount. comment, comment_to)
def withdrawfromvault(self, fromaddress, toaddress, amount, comment=None, comment_to=None):
"""
Withdraws *amount* from the vault *fromaddress* to the specified *toaddress*.
Arguments:
- *fromaddress* -- Bitcoin vault address to send from.
- *toaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.00000001).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.withdrawfromvault(fromaddress, toaddress, amount)
elif comment_to is None:
return self.proxy.withdrawfromvault(fromaddress, toaddress, amount, comment)
else:
return self.proxy.withdrawfromvault(fromaddress, toaddress, amount. comment, comment_to)
def overridevaulttx(self, fromvault, toaddress, comment=None, comment_to=None):
"""
Withdraws *amount* from the vault *fromaddress* to the specified *toaddress*.
Arguments:
- *fromvault* -- Bitcoin vault address to send from.
- *toaddress* -- Bitcoin address to send to.
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.overridevaulttx(fromvault, toaddress)
elif comment_to is None:
return self.proxy.overridevaulttx(fromvault, toaddress, comment)
else:
return self.proxy.overridevaulttx(fromvault, toaddress, comment, comment_to)
def getreceivedbyaddress(self, bitcoinaddress, minconf=1):
"""
Returns the total amount received by a bitcoin address in transactions with at least a
certain number of confirmations.
Arguments:
- *bitcoinaddress* -- Address to query for total amount.
- *minconf* -- Number of confirmations to require, defaults to 1.
"""
return self.proxy.getreceivedbyaddress(bitcoinaddress, minconf)
def getreceivedbyaccount(self, account, minconf=1):
"""
Returns the total amount received by addresses with an account in transactions with
at least a certain number of confirmations.
Arguments:
- *account* -- Account to query for total amount.
- *minconf* -- Number of confirmations to require, defaults to 1.
"""
return self.proxy.getreceivedbyaccount(account, minconf)
def gettransaction(self, txid):
"""
Get detailed information about transaction
Arguments:
- *txid* -- Transactiond id for which the info should be returned
"""
return TransactionInfo(**self.proxy.gettransaction(txid))
def getrawtransaction(self, txid, verbose=True):
"""
Get transaction raw info
Arguments:
- *txid* -- Transactiond id for which the info should be returned.
- *verbose* -- If False, return only the "hex" of the transaction.
"""
if verbose:
return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))
return self.proxy.getrawtransaction(txid, 0)
def gettxout(self, txid, index, mempool=True):
"""
Returns details about an unspent transaction output (UTXO)
Arguments:
- *txid* -- Transactiond id for which the info should be returned.
- *index* -- The output index.
- *mempool* -- Add memory pool transactions.
"""
tx = self.proxy.gettxout(txid, index, mempool)
if tx != None:
return TransactionInfo(**tx)
else:
return TransactionInfo()
def createrawtransaction(self, inputs, outputs):
"""
Creates a raw transaction spending given inputs
(a list of dictionaries, each containing a transaction id and an output number),
sending to given address(es).
Returns hex-encoded raw transaction.
Example usage:
>>> conn.createrawtransaction(
[{"txid": "a9d4599e15b53f3eb531608ddb31f48c695c3d0b3538a6bda871e8b34f2f430c",
"vout": 0}],
{"mkZBYBiq6DNoQEKakpMJegyDbw2YiNQnHT":50})
Arguments:
- *inputs* -- A list of {"txid": txid, "vout": n} dictionaries.
- *outputs* -- A dictionary mapping (public) addresses to the amount
they are to be paid.
"""
return self.proxy.createrawtransaction(inputs, outputs)
def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):
"""
Sign inputs for raw transaction (serialized, hex-encoded).
Returns a dictionary with the keys:
"hex": raw transaction with signature(s) (hex-encoded string)
"complete": 1 if transaction has a complete set of signature(s), 0 if not
Arguments:
- *hexstring* -- A hex string of the transaction to sign.
- *previous_transactions* -- A (possibly empty) list of dictionaries of the form:
{"txid": txid, "vout": n, "scriptPubKey": hex, "redeemScript": hex}, representing
previous transaction outputs that this transaction depends on but may not yet be
in the block chain.
- *private_keys* -- A (possibly empty) list of base58-encoded private
keys that, if given, will be the only keys used to sign the transaction.
"""
return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys))
def decoderawtransaction(self, hexstring):
"""
Produces a human-readable JSON object for a raw transaction.
Arguments:
- *hexstring* -- A hex string of the transaction to be decoded.
"""
return dict(self.proxy.decoderawtransaction(hexstring))
def listsinceblock(self, block_hash):
res = self.proxy.listsinceblock(block_hash)
res['transactions'] = [TransactionInfo(**x) for x in res['transactions']]
return res
def listreceivedbyaddress(self, minconf=1, includeempty=False):
"""
Returns a list of addresses.
Each address is represented with a :class:`~bitcoinrpc.data.AddressInfo` object.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *includeempty* -- Whether to include addresses that haven't received any payments.
"""
return [AddressInfo(**x) for x in
self.proxy.listreceivedbyaddress(minconf, includeempty)]
def listaccounts(self, minconf=1, as_dict=False):
"""
Returns a list of account names.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *as_dict* -- Returns a dictionary of account names, with their balance as values.
"""
if as_dict:
return dict(self.proxy.listaccounts(minconf))
else:
return self.proxy.listaccounts(minconf).keys()
def listreceivedbyaccount(self, minconf=1, includeempty=False):
"""
Returns a list of accounts.
Each account is represented with a :class:`~bitcoinrpc.data.AccountInfo` object.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *includeempty* -- Whether to include addresses that haven't received any payments.
"""
return [AccountInfo(**x) for x in
self.proxy.listreceivedbyaccount(minconf, includeempty)]
def listtransactions(self, account=None, count=10, from_=0, address=None):
"""
Returns a list of the last transactions for an account.
Each transaction is represented with a :class:`~bitcoinrpc.data.TransactionInfo` object.
Arguments:
- *account* -- Account to list transactions from. Return transactions from
all accounts if None.
- *count* -- Number of transactions to return.
- *from_* -- Skip the first <from_> transactions.
- *address* -- Receive address to consider
"""
accounts = [account] if account is not None else self.listaccounts(as_dict=True).keys()
return [TransactionInfo(**tx) for acc in accounts for
tx in self.proxy.listtransactions(acc, count, from_) if
address is None or tx["address"] == address]
def backupwallet(self, destination):
"""
Safely copies ``wallet.dat`` to *destination*, which can be a directory or a path
with filename.
Arguments:
- *destination* -- directory or path with filename to backup wallet to.
"""
return self.proxy.backupwallet(destination)
def validateaddress(self, validateaddress):
"""
Validate a bitcoin address and return information for it.
The information is represented by a :class:`~bitcoinrpc.data.AddressValidation` object.
Arguments: -- Address to validate.
- *validateaddress*
"""
return AddressValidation(**self.proxy.validateaddress(validateaddress))
def getbalance(self, account=None, minconf=None):
"""
Get the current balance, either for an account or the total server balance.
Arguments:
- *account* -- If this parameter is specified, returns the balance in the account.
- *minconf* -- Minimum number of confirmations required for transferred balance.
"""
args = []
if account is not None:
args.append(account)
if minconf is not None:
args.append(minconf)
return self.proxy.getbalance(*args)
def move(self, fromaccount, toaccount, amount, minconf=1, comment=None):
"""
Move from one account in your wallet to another.
Arguments:
- *fromaccount* -- Source account name.
- *toaccount* -- Destination account name.
- *amount* -- Amount to transfer.
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment to add to transaction log.
"""
if comment is None:
return self.proxy.move(fromaccount, toaccount, amount, minconf)
else:
return self.proxy.move(fromaccount, toaccount, amount, minconf, comment)
def sendfrom(self, fromaccount, tobitcoinaddress, amount, minconf=1, comment=None,
comment_to=None):
"""
Sends amount from account's balance to bitcoinaddress. This method will fail
if there is less than amount bitcoins with minconf confirmations in the account's
balance (unless account is the empty-string-named default account; it
behaves like the sendtoaddress method). Returns transaction ID on success.
Arguments:
- *fromaccount* -- Account to send from.
- *tobitcoinaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.01).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf)
elif comment_to is None:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf, comment)
else:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf,
comment, comment_to)
def sendmany(self, fromaccount, todict, minconf=1, comment=None):
"""
Sends specified amounts from account's balance to bitcoinaddresses. This method will fail
if there is less than total amount bitcoins with minconf confirmations in the account's
balance (unless account is the empty-string-named default account; Returns transaction ID
on success.
Arguments:
- *fromaccount* -- Account to send from.
- *todict* -- Dictionary with Bitcoin addresses as keys and amounts as values.
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
"""
if comment is None:
return self.proxy.sendmany(fromaccount, todict, minconf)
else:
return self.proxy.sendmany(fromaccount, todict, minconf, comment)
def verifymessage(self, bitcoinaddress, signature, message):
"""
Verifies a signature given the bitcoinaddress used to sign,
the signature itself, and the message that was signed.
Returns :const:`True` if the signature is valid, and :const:`False` if it is invalid.
Arguments:
- *bitcoinaddress* -- the bitcoinaddress used to sign the message
- *signature* -- the signature to be verified
- *message* -- the message that was originally signed
"""
return self.proxy.verifymessage(bitcoinaddress, signature, message)
def getwork(self, data=None):
"""
Get work for remote mining, or submit result.
If data is specified, the server tries to solve the block
using the provided data and returns :const:`True` if it was successful.
If not, the function returns formatted hash data (:class:`~bitcoinrpc.data.WorkItem`)
to work on.
Arguments:
- *data* -- Result from remote mining.
"""
if data is None:
# Only if no data provided, it returns a WorkItem
return WorkItem(**self.proxy.getwork())
else:
return self.proxy.getwork(data)
def listunspent(self, minconf=1, maxconf=999999):
"""
Returns a list of unspent transaction inputs in the wallet.
Arguments:
- *minconf* -- Minimum number of confirmations required to be listed.
- *maxconf* -- Maximal number of confirmations allowed to be listed.
"""
return [TransactionInfo(**tx) for tx in
self.proxy.listunspent(minconf, maxconf)]
def keypoolrefill(self):
"Fills the keypool, requires wallet passphrase to be set."
self.proxy.keypoolrefill()
def walletpassphrase(self, passphrase, timeout, dont_raise=False):
"""
Stores the wallet decryption key in memory for <timeout> seconds.
- *passphrase* -- The wallet passphrase.
- *timeout* -- Time in seconds to keep the wallet unlocked
(by keeping the passphrase in memory).
- *dont_raise* -- instead of raising `~bitcoinrpc.exceptions.WalletPassphraseIncorrect`
return False.
"""
try:
self.proxy.walletpassphrase(passphrase, timeout)
return True
except BitcoinException as exception:
if dont_raise:
if isinstance(exception, WalletPassphraseIncorrect):
return False
elif isinstance(exception, WalletAlreadyUnlocked):
return True
raise exception
def walletlock(self):
"""
Removes the wallet encryption key from memory, locking the wallet.
After calling this method, you will need to call walletpassphrase
again before being able to call any methods which require the wallet
to be unlocked.
"""
return self.proxy.walletlock()
def walletpassphrasechange(self, oldpassphrase, newpassphrase, dont_raise=False):
"""
Changes the wallet passphrase from <oldpassphrase> to <newpassphrase>.
Arguments:
- *dont_raise* -- instead of raising `~bitcoinrpc.exceptions.WalletPassphraseIncorrect`
return False.
"""
try:
self.proxy.walletpassphrasechange(oldpassphrase, newpassphrase)
return True
except BitcoinException as exception:
if dont_raise and isinstance(exception, WalletPassphraseIncorrect):
return False
raise exception
def dumpprivkey(self, address):
"""
Returns the private key belonging to <address>.
Arguments:
- *address* -- Bitcoin address whose private key should be returned.
"""
return self.proxy.dumpprivkey(address)
def dumpblockchain(self, start_height = 0, end_height = -1):
"""
Dumps the blockchain.
Arguments:
- *start_height* -- Starting block height.
- *end_height* -- Ending block height.
"""
return self.proxy.dumpblockchain(start_height, end_height)
def dumpmempool(self):
"""
Dumps the mempool.
"""
return self.proxy.dumpmempool()
def signmessage(self, address, message):
"""
Sign messages, returns the signature
:param address: Bitcoin address used to sign a message
:type address: str or unicode
:param message: The message to sign
:type message: str or unicode
:rtype: unicode
"""
return self.proxy.signmessage(address, message)
def verifymessage(self, address, signature, message):
"""
Verify a signed message
:param address: Bitcoin address used to sign a message
:type address: str or unicode
:param signature: The signature
:type signature: unicode
:param message: The message to sign
:type message: str or unicode
:rtype: bool
"""
return self.proxy.verifymessage(address, signature, message)
|
obulpathi/reversecoin
|
reversecoin/bitcoinrpc/connection.py
|
Python
|
gpl-2.0
| 28,122
|
import os
import sys
import transaction
import pysword.books
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Base,
TTitle,
TPresenter,
TBibleBook,
TChapter,
TEventType,
)
import re
def roman_to_int(n):
n = str(n).upper()
numeral_map = zip((1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'))
i = result = 0
for integer, numeral in numeral_map:
while n[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
return result
def roman_to_int_repl(match):
return str(roman_to_int(match.group(0)))
roman_regex = re.compile(r'\b(?=[MDCLXVI]+\b)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})\b')
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
titles = [TTitle(sTitle='Rev'),
TTitle(sTitle='Mr'),
TTitle(sTitle='Mrs'),
TTitle(sTitle='Miss'),
TTitle(sTitle='Ms'),
TTitle(sTitle='Dr'),
TTitle(sTitle='Sir'),]
DBSession.add_all(titles)
presenters = [TPresenter(title=titles[0],
sFirstName='Joel',
sLastName='Overduin',
sSaName='Pastor Joel Overduin'),
TPresenter(title=titles[0],
sFirstName='Carl',
sLastName='Schouls',
sSaName='Pastor Carl A. Schouls'),
TPresenter(title=titles[1],
sFirstName='Russel',
sLastName='Herman',
sSaName='Russel Herman'),
TPresenter(title=titles[1],
sFirstName='Michael',
sLastName='Jaatinen',
sSaName='Michael Jaatinen'),
TPresenter(title=titles[1],
sFirstName='Arie',
sLastName='VanDyk'
),
TPresenter(title=titles[0],
sFirstName='Richard',
sLastName='Miller',
sSaName='Richard J. Miller'),
TPresenter(title=titles[1],
sFirstName='Brad',
sLastName='Pennings'
),
TPresenter(title=titles[0],
sFirstName='David',
sLastName='Lipsy',
sSaName='Pastor David Lipsy'),
TPresenter(title=titles[0],
sFirstName='Pieter',
sLastName='VanderMeyden',
sSaName='Rev. Pieter VanderMeyden'),
TPresenter(title=titles[0],
sFirstName='David',
sLastName='Van Brugge',
sSaName='David Van Brugge'),
TPresenter(title=titles[0],
sFirstName='John',
sLastName='Koopman',
sSaName='John Koopman'),
TPresenter(title=titles[0],
sFirstName='Henry',
sLastName='Van Essen',
sSaName='Pastor Henry VanEssen'),
TPresenter(title=titles[0],
sFirstName='Stanley',
sLastName='McKenzie',
sSaName='Stanley McKenzie'),
TPresenter(title=titles[0],
sFirstName='Henry',
sLastName='Bartsch',
sSaName='Rev. Henry Bartsch'),
TPresenter(title=titles[0],
sFirstName='Timothy',
sLastName='Bergsma',
sSaName='Tim Bergsma'),
TPresenter(title=titles[1],
sFirstName='Gerald',
sLastName='Harke'
),
TPresenter(title=titles[0],
sFirstName='Ian',
sLastName='MacLeod',
sSaName='Ian MacLeod'),
TPresenter(title=titles[0],
sFirstName='C.',
sLastName='Heiberg',
sSaName='Rev. C. Heiberg'),
TPresenter(title=titles[1],
sFirstName='Ken',
sLastName='Pennings',
sSaName='Ken Pennings'),
TPresenter(title=titles[1],
sFirstName='John',
sLastName='Procee',
sSaName='John Procee'),
TPresenter(title=titles[5],
sFirstName='Lawrence',
sLastName='Bilkes',
sSaName='L. W. Bilkes'),
TPresenter(title=titles[0],
sFirstName='Cornelis',
sLastName='Pronk',
sSaName='Rev. Cornelis (Niel) Pronk'),
TPresenter(title=titles[0],
sFirstName='Harold',
sLastName='Zekveld',
sSaName='Harry Zekveld'),
TPresenter(title=titles[1],
sFirstName='Brian',
sLastName='Luth'
),
TPresenter(title=titles[0],
sFirstName='David',
sLastName='Kranendonk',
sSaName='Rev. David Kranendonk'),
TPresenter(title=titles[0],
sFirstName='John',
sLastName='van Eyk',
sSaName='John van Eyk'),
TPresenter(title=titles[0],
sFirstName='Bartel',
sLastName='Elshout',
sSaName='Rev. Bartel Elshout'),
TPresenter(title=titles[0],
sFirstName='Ken',
sLastName='Herfst',
sSaName='Rev. Ken Herfst'),
TPresenter(title=titles[0],
sFirstName='Robert',
sLastName='VanDoodewaard',
sSaName='Rob VanDoodewaard'),
TPresenter(title=titles[0],
sFirstName='Scott',
sLastName='Dibbet',
sSaName='Scott Dibbet'),
TPresenter(title=titles[0],
sFirstName='Eric',
sLastName='Moerdyk',
sSaName='Pastor Eric Moerdyk'),]
DBSession.add_all(presenters)
chapters = []
for testament, books in pysword.books.testaments.items():
iOrder = 1
for b in books:
tbook = TBibleBook(bOldTestament= (testament == 'ot'),
# sermon audio doesn't like roman numerals so replace them with integers
sBook=roman_regex.sub(roman_to_int_repl, b.name),
sAbbrev=b.preferred_abbreviation,
iOrder=iOrder,
iNumChapters=b.num_chapters)
for chap, verses in enumerate(b.chapter_lengths):
chapters.append(TChapter(iChapter=chap + 1,
iMaxVerse=verses,
book=tbook))
iOrder = iOrder + 1
DBSession.add_all(chapters)
event_types = [TEventType(sEventType='Audio Book'),
TEventType(sEventType='Bible Study'),
TEventType(sEventType='Camp Meeting'),
TEventType(sEventType='Chapel Service'),
TEventType(sEventType='Children'),
TEventType(sEventType='Conference'),
TEventType(sEventType='Current Events'),
TEventType(sEventType='Debate'),
TEventType(sEventType='Devotional'),
TEventType(sEventType='Funeral Service'),
TEventType(sEventType='Midweek Service'),
TEventType(sEventType='Podcast'),
TEventType(sEventType='Prayer Meeting'),
TEventType(sEventType='Question & Answer'),
TEventType(sEventType='Radio Broadcast'),
TEventType(sEventType='Special Meeting'),
TEventType(sEventType='Sunday - AM'),
TEventType(sEventType='Sunday - PM'),
TEventType(sEventType='Sunday Afternoon'),
TEventType(sEventType='Sunday School'),
TEventType(sEventType='Sunday Service'),
TEventType(sEventType='Teaching'),
TEventType(sEventType='Testimony'),
TEventType(sEventType='TV Broadcast'),
TEventType(sEventType='Video DVD'),
TEventType(sEventType='Wedding'),
TEventType(sEventType='Youth'),]
DBSession.add_all(event_types)
# TODO loop through books and build TBibleBook objects
# model = MyModel(name='one', value=1)
# DBSession.add(model)
|
tanj/SermonLog
|
sermonlog/scripts/initializedb.py
|
Python
|
gpl-2.0
| 11,323
|
"""Generic functions for generating code."""
import json
import sys
import configen.generator_cpp as cpp
_LANGUAGE_MODULE_DICT = {'c++': cpp}
def write_files(code, language, filename):
generator_module = _LANGUAGE_MODULE_DICT[language]
generator_module.write_files(code, filename)
def convert_json(json_schema, language, **kwargs):
"""Convert json to dict and call actual generator function."""
try:
json_data = json.loads(json_schema)
except Exception as e:
print("Error: failed to parse json")
print(str(e))
sys.exit(1)
return convert_schema_to_language(json_data, language, **kwargs)
def convert_schema_to_language(schema, language, **kwargs):
"""Get generators for particular language, start and end processing."""
generator_module = _LANGUAGE_MODULE_DICT[language]
name_code_dict = {}
for object_name, object_schema in schema.items():
name_code_dict[object_name] = convert_schema(generator_module,
object_schema)
return generator_module.generate_files(name_code_dict, **kwargs)
_SIMPLE_TYPES = ['bool', 'integer', 'number', 'string']
def convert_schema(generator_module, schema):
"""Walk schema tree calling appropriate makers for generating code.
The code and state is stored in a dictionary. Makers is a
dictionary with functions that are called during schema tree
walking.
"""
if 'type' in schema:
if schema['type'] in _SIMPLE_TYPES:
return generator_module.generate_variable(schema)
if schema['type'] == 'object':
members = {
member_name: convert_schema(generator_module, member_schema)
for member_name, member_schema in schema['properties'].items()}
return generator_module.generate_object(members)
if schema['type'] == 'array':
array_element = convert_schema(generator_module, schema['items'])
return generator_module.generate_array(array_element, schema)
if '$ref' in schema:
return generator_module.generate_reference(schema)
# unknown type
return None
|
alexey-naydenov/configen
|
configen/generate.py
|
Python
|
gpl-2.0
| 2,185
|
import unittest
import threading
import time
import tempfile
import os
from logmon.textlog import TextLog
file_created = threading.Lock()
log_attached = threading.Lock()
class TestTextLog(unittest.TestCase):
@classmethod
def write_to_log(cls, filename, num_repeats=10):
with open(filename, "w") as f:
file_created.release()
repeat = 0
log_attached.acquire()
while repeat < num_repeats:
f.write("Repeat %s" % (repeat + 1))
f.flush()
time.sleep(1.5)
repeat += 1
log_attached.release()
@classmethod
def read_from_log(cls, log_obj, num_repeats=20):
repeat = 0
ret = ""
while repeat < num_repeats:
ret += log_obj.read()
time.sleep(1)
repeat += 1
return ret
def setUp(self):
self.tmp_filename = tempfile.mktemp()
self.object = TextLog(self.tmp_filename)
def test_attach(self):
pass
def test_read(self):
log_attached.acquire()
file_created.acquire()
thread = threading.Thread(target=TestTextLog.write_to_log, args=[self.tmp_filename])
thread.start()
file_created.acquire()
self.object.attach()
log_attached.release()
read_log = TestTextLog.read_from_log(self.object)
thread.join()
with open(self.tmp_filename, 'r') as f:
self.assertEqual(f.read().strip(), read_log.strip())
# def tearDown(self):
# os.remove(self.tmp_filename)
|
avkhanov/logmon
|
logmon_test/test_textlog.py
|
Python
|
gpl-2.0
| 1,593
|
# -*- coding: utf-8 -*-
"""
qgiscloudapi
library for accessing the qgiscloud API using Python
Copyright 2011 Sourcepole AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by maplicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
### basic usage example
# from qgiscloudapi.qgiscloudapi import *
#
# api = API()
# api.set_auth(user='myname', password='secretpassword')
#
# maps = api.read_maps()
"""
# python versions below 2.6 do not have json included we need simplejson then
try:
import json
except ImportError:
from .. import simplejson as json
import time
from urllib import urlencode
import urllib2
from version import __version__
API_URL = 'https://api.qgiscloud.com'
__all__ = ['API', 'UnauthorizedError', 'ConnectionException', 'TokenRequiredError', 'BadRequestError', 'ForbiddenError',
'ConflictDuplicateError', 'GoneError', 'InternalServerError',
'NotImplementedError', 'ThrottledError']
class API():
"""
The API class contains all methods to access the qgiscloud RESTful
API.
It wraps the HTTP requests to resources in convenient methods and also
takes care of authenticating each request with a token, if needed.
The create_token, check_token, get_token and set_token methods can be
used to work with the token from outside the API class. This might be
useful when it is not intended to ask users for their user and
password for new instances of the API class.
To instantiate API with a predefined token use something like:
# token = json.loads('{"token": "A2wY7qgUNM5eTRM3Lz6D4RZHuGmYPP"}')
# api = API(token=token)
"""
user = None
password = None
_token = None
cache = None
url = None
def __init__(self, token=None, cache=None, url=API_URL):
self.set_token(token)
self.cache = cache
self.url = url
def set_url(self, url):
self.url = url
def api_url(self):
return API_URL
def check_versions(self):
request = Request(cache=self.cache, url=self.url)
content = request.get('/.meta/version.json')
return json.loads(content)
def requires_auth(self):
"""
requires_auth checks that methods that require
a token can't be called without a token.
If check_token doesn't return True a TokenRequiredError exception is
raised telling the caller to use the create_token method to get a
valid token.
"""
if not (self.check_auth() or self.check_token()):
raise TokenRequiredError
def set_auth(self, user, password):
"""
Set user/password for authentication.
"""
self.user = user
self.password = password
return True
def reset_auth(self):
"""
Reset user/password for authentication.
"""
self.user = self.password = None
def check_auth(self):
"""
Set user/password for authentication.
"""
if self.user and self.password:
return True
return False
def check_login(self, version_info):
self.requires_auth()
resource = '/notifications.json'
data = {
'type': 'Login',
'info': version_info
}
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.post(resource, data)
login_info = json.loads(content)
if 'clouddb' not in login_info:
login_info['clouddb'] = True
return login_info
def create_token(self, user, password):
"""
Queries the API for a new Token and saves it as self._token.
"""
request = Request(user=user, password=password, cache=self.cache, url=self.url)
content = request.post('/token.json')
self.set_token(json.loads(content))
return True
def check_token(self):
"""
This method checks if there's a token.
"""
token = self.get_token()
if token:
return True
return False
def set_token(self, token):
"""
We use set_token to set the token.
"""
self._token = token
def get_token(self):
"""
We use get_token to get the token.
"""
return self._token
def create_database(self):
"""
Create a database.
"""
self.requires_auth()
resource = '/databases.json'
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
data = {}
content = request.post(resource, data)
return json.loads(content)
def read_databases(self):
"""
Returns a list of databases.
"""
self.requires_auth()
resource = '/databases.json'
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.get(resource)
return json.loads(content)
def delete_database(self, db_name):
"""
Delete a database.
"""
self.requires_auth()
resource = '/databases/%s.json' % (db_name)
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.delete(resource)
return json.loads(content)
def create_table(self, db_name, table, overwrite_table, columns, srid, geometry_type, provider, pkey=None, geom_column=None, geom_column_index=None):
"""
Create a new table
overwrite_table = <bool>, drop table if it exists
columns = [
{
'name': '<NAME>',
'type': '<TYPE>',
'length': <LENGTH>,
'precision': <PRECISION>
}, ...
]
srid = 'EPSG:<SRID>'
geometry_type = 'POINT' | 'MULTIPOINT' | 'LINESTRING' | 'MULTILINESTRING' | 'POLYGON' | 'MULTIPOLYGON'
provider = '<QGIS PROVIDER NAME>'
pkey, geom_column, geom_column_index = primary key and geometry column and index for PostGIS provider
"""
self.requires_auth()
resource = '/databases/%s/tables.json' % (db_name)
data = {
'table': {
'overwrite': overwrite_table,
'name': table,
'columns': [],
'srid': srid,
'geometry_type': geometry_type
},
'provider': provider
}
data['table']['columns'] = columns
if pkey is not None:
data['table']['pkey'] = pkey
if geom_column is not None:
data['table']['geom_column'] = geom_column
if geom_column_index is not None:
data['table']['geom_column_index'] = geom_column_index
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.post(resource, data)
return json.loads(content)
def create_map(self, name, mapfile, config):
"""
Create a new map and return it.
"""
self.requires_auth()
resource = '/maps.json'
file = open(mapfile, "rb")
encoded_file = file.read()
data = {
'map': {
'name' : unicode(name).encode('utf-8'),
'config': config
},
'file': encoded_file
}
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.post(resource, data)
return json.loads(content)
def read_maps(self):
"""
Returns a list of maps.
"""
self.requires_auth()
resource = '/maps.json'
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.get(resource)
return json.loads(content)
def read_map(self, map_name):
"""
Returns all map details.
"""
#self.requires_auth()
resource = '/maps/%s.json' % (map_name)
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.get(resource)
return json.loads(content)
def delete_map(self, map_name):
"""
Delete a map.
"""
self.requires_auth()
resource = '/maps/%s.json' % (map_name)
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
request.delete(resource)
return True
def create_graphic(self, name, symbol):
"""
Upload a symbol.
"""
self.requires_auth()
resource = '/graphics.json'
file = open(symbol, "rb")
encoded_file = file.read()
data = {
'graphic': {
'name' : unicode(name).encode('utf-8'),
},
'file': encoded_file
}
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.post(resource, data)
return json.loads(content)
def create_exception(self, exception, version_info, project_fname):
"""
Upload a plugin exception.
"""
self.requires_auth()
resource = '/notifications.json'
encoded_file = ''
try:
file = open(project_fname, 'rb')
encoded_file = file.read()
except:
pass
try:
exception_info = exception + str(version_info) + encoded_file
except:
exception_info = 'No exception info (message has encoding problems)' + str(version_info)
data = {
'type': 'ClientException',
'info': exception_info
}
request = Request(user=self.user, password=self.password, token=self.get_token(), cache=self.cache, url=self.url)
content = request.post(resource, data)
return json.loads(content)
###
#
# EXCEPTIONS
#
###
class ConnectionException(Exception):
"""
We raise this exception if the API was unreachable.
"""
pass
class TokenRequiredError(Exception):
"""
We raise this exception if a method requires a token but self._token
is none.
Use the create_token() method to get a new token.
"""
def __unicode__(self):
return 'No valid token. Use create_token(user, password) to get a new one'
class BadRequestError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 400
BAD REQUEST.
"""
#msgs = {}
msgs = []
def __init__(self, value):
try:
self.msgs = json.loads(value) #json.loads(value[12:])
except ValueError:
self.msgs = [] #{}
def __str__(self):
#msg = ''
#for key in self.msgs:
# msg = msg + key + ': ' + self.msgs[key] + '\n'
msg = '\n'.join(self.msgs)
return msg
class UnauthorizedError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 401
UNAUTHORIZED.
"""
pass
class ForbiddenError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 403
FORBIDDEN.
"""
pass
class NotFoundError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 404
NOT FOUND.
"""
pass
class ConflictDuplicateError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 409
DUPLICATE ENTRY.
"""
pass
class GoneError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 410
GONE.
"""
pass
class InternalServerError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 500
INTERNAL SERVER ERROR.
"""
pass
class NotImplementedError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 501
NOT IMPLEMENTED.
"""
pass
class ThrottledError(Exception):
"""
We raise this exception whenever the API answers with HTTP STATUS 503
THROTTLED.
"""
pass
###
#
# Custom HTTPBasicAuthHandler with fix for infinite retries when submitting wrong password
# http://bugs.python.org/issue8797
# http://bugs.python.org/file20471/simpler_fix.patch
#
###
class HTTPBasicAuthHandlerLimitRetries(urllib2.HTTPBasicAuthHandler):
def __init__(self, *args, **kwargs):
urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
def http_error_auth_reqed(self, authreq, host, req, headers):
authreq = headers.get(authreq, None)
if authreq:
mo = urllib2.AbstractBasicAuthHandler.rx.search(authreq)
if mo:
if len(mo.groups()) == 3:
scheme, quote, realm = mo.groups()
else:
scheme, realm = mo.groups()
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = ("%s:%s" % (user, pw)).encode('utf8')
auth = 'Basic %s' % urllib2.base64.b64encode(raw).strip()
if req.get_header(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
#return self.parent.open(req, timeout=req.timeout)
return self.parent.open(req)
###
#
# Request Class using urllib2 to fire HTTP requests
#
###
class Request():
"""
Request is used internally to actually fire API requests. It has some
handy shortcut methods for POST, GET, PUT and DELETE, sets correct
headers for each method, takes care of encoding data and handles all API
errors by throwing exceptions.
"""
user = None
password = None
token = None
version = None
url = None
def __init__(self, user=None, password=None, token=None, cache=None, version=__version__, url=API_URL):
self.user = user
self.password = password
self.token = token
self.version = version
self.cache = cache # FIXME: no caching in urllib2?
self.url = url
def post(self, resource, data={}):
return self.request(resource, method='POST', data=data)
def get(self, resource):
return self.request(resource, method='GET')
def put(self, resource, data={}):
return self.request(resource, method='PUT', data=data)
def delete(self, resource):
return self.request(resource, method='DELETE')
def request(self, resource, method='GET', data=None, headers={}):
"""
use urllib2
"""
url = self.url + resource
#
# If the current API instance has a valid token we add the Authorization
# header with the correct token.
#
# In case we do not have a valid token but user and password are
# provided we automatically use them to add a HTTP Basic Authenticaion
# header to the request to create a new token.
#
if self.token is not None:
headers['Authorization'] = 'auth_token="%s"' % (self.token['token'])
elif self.user is not None and self.password is not None:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, self.url, self.user, self.password)
auth_handler = HTTPBasicAuthHandlerLimitRetries(password_manager)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
#
# The API expects the body to be urlencoded. If data was passed to
# the request method we therefore use urlencode from urllib.
#
if data == None:
body = ''
else:
body = urlencode(data)
#
# We set the Host Header for MacOSX 10.5, to circumvent the NotFoundError
#
#headers['Host'] = 'api.qgiscloud.com'
#
# We set the User-Agent Header to qgiscloudapi and the local version.
# This enables basic statistics about still used qgiscloudapi versions in
# the wild.
#
headers['User-Agent'] = 'qgiscloudapi/%s' % (self.version)
#
# The API expects PUT or POST data to be x-www-form-urlencoded so we
# also set the correct Content-Type header.
#
if method.upper() in ['PUT', 'POST']:
headers['Content-Type']='application/x-www-form-urlencoded'
#headers['Content-Type']='multipart/form-data'
#
# We also set the Content-Length and Accept-Encoding headers.
#
headers['Content-Length'] = str(len(body))
headers['Accept-Encoding'] = 'compress, gzip'
#
# Finally we fire the actual request.
#
for i in range(1, 6):
try:
request_method = method.upper()
if request_method in ['PUT', 'POST']:
req = urllib2.Request(url=url, data=body, headers=headers)
else:
req = urllib2.Request(url=url, headers=headers)
if request_method in ['PUT', 'DELETE']:
# add PUT and DELETE methods
req.get_method = lambda: request_method
response = urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
#
# Handle the possible responses according to their HTTP STATUS
# CODES.
#
# All non success STATUS CODES raise an exception containing
# the API error message.
#
msg = e.read().decode('UTF8', errors='ignore')
if e.code in [201, 202, 203, 204]: # Workaround for old Pythons
return msg
elif e.code == 400:
raise BadRequestError(msg)
elif e.code == 401:
raise UnauthorizedError(msg)
elif e.code == 403:
raise ForbiddenError(msg)
elif e.code == 404:
raise NotFoundError()
elif e.code == 409:
raise ConflictDuplicateError(msg)
elif e.code == 410:
raise GoneError(msg)
elif e.code == 422: # Unprocessable Entity
raise BadRequestError(msg)
#
# 500 INTERNAL SERVER ERRORs normally shouldn't happen...
#
elif e.code == 500:
raise InternalServerError(msg)
elif e.code == 501:
raise NotImplementedError(msg)
elif e.code == 503:
raise ThrottledError(msg)
except urllib2.URLError, e:
# if we could not reach the API we wait 1s and try again
time.sleep(1)
# if we tried for the fifth time we give up - and cry a little
if i == 5:
raise ConnectionException('Could not connect to API...')
else:
#
# 200 OK, 201 CREATED and 204 DELETED result in returning the actual
# response.
#
return response.decode('UTF8')
|
manisandro/qgis-cloud-plugin
|
qgiscloud/qgiscloudapi/qgiscloudapi.py
|
Python
|
gpl-2.0
| 20,543
|
# Copyright (C) 2014 SocialCookies @IV/GII
# @anaprados @oskyar @torresj @josemlp91
# @franciscomanuel @rogegg @pedroag @melero90
# Aplicacion web, para gestionar pedidos de galletas,
# con fotos de Instagram y Twitter.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'webcookies.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'socialcookies/', include('socialcookies.urls')),
#url(r'^', 'socialcookies.views.hola'),
#url(r'^hola/', 'socialcookies.views.hola'),
)
|
IV-GII/SocialCookies
|
ENV1/webcookies/webcookies/urls.py
|
Python
|
gpl-2.0
| 1,375
|
#!/usr/bin/env python
"""
This will show russian text in koi8-r encoding.
"""
from xml.parsers import expat
import string
# Produces ImportError in 1.5, since this test can't possibly pass there
import codecs
class XMLTree:
def __init__(self):
pass
# Define a handler for start element events
def StartElement(self, name, attrs ):
#name = name.encode()
print "<", repr(name), ">"
print "attr name:", attrs.get("name",unicode("")).encode("koi8-r")
print "attr value:", attrs.get("value",unicode("")).encode("koi8-r")
def EndElement(self, name ):
print "</", repr(name), ">"
def CharacterData(self, data ):
if string.strip(data):
data = data.encode("koi8-r")
print data
def LoadTree(self, filename):
# Create a parser
Parser = expat.ParserCreate()
# Tell the parser what the start element handler is
Parser.StartElementHandler = self.StartElement
Parser.EndElementHandler = self.EndElement
Parser.CharacterDataHandler = self.CharacterData
# Parse the XML File
ParserStatus = Parser.Parse(open(filename,'r').read(), 1)
def runTest():
win = XMLTree()
win.LoadTree("enc_test.xml")
return win
runTest()
|
Pikecillo/genna
|
external/PyXML-0.8.4/test/test_encodings.py
|
Python
|
gpl-2.0
| 1,292
|
#!/usr/bin python
import sys
sys.path.insert(0,'/home/william/caffe-master/python')
import os
import numpy as np
import matplotlib.pyplot as plt
import caffe
'''
The wrapper for calling the deep-net to get the feature of the region
Here will provide two way for calling:
One: provide the folder where storage the image
Two: provide a image to extract the feature
'''
class NetWrapper(object):
def __init__(self,cpu_only,model,proto):
'''
Inital the net paramter:
cpu_only: true for use cpu,false for use GPU
model: the mode have been trained
proto: the prototxt defining the net structure used for testing
'''
if cpu_only:
caffe.set_mode_cpu()
else:
caffe.set_device(0)
caffe.set_mode_gpu()
self.net = caffe.Net(proto,model,caffe.TEST)
def preprocess(self,
transpose=(2,0,1),
scale=225,
mean=np.array([104,117,123]),
channel_swap=(2,1,0)
):
'''
preprocess the image to feat to the net model
This is very import for second usage: provide a image
'''
self.transformer = caffe.io.Transformer(
{'data':self.net.blobs['data'].shape}
)
self.transformer.set_transpose('data',transpose)
self.transformer.set_mean('data',mean)
self.transformer.set_raw_scale('data',scale)
self.transformer.set_channel_swap('data',channel_swap)
def getfeature(
self,
image=None,
aux_file=None,
out_layer=None,
feature_layer=None,
feature_path=None
):
'''
get the feature
input:
if the image provided, then this net used for getting the
feature for one image
if the image stay None, then this net used for getting the
feature for the picture in the test database
loop_num is the time call for net forward pass
batch size is the number of data feat to net each pass
total num is the number of test image
the data of out_layer contains the predict result
the data of feature_layer contains the feature result
labels_file contians the label of input data
feature_file used for save the result
return:
the result will stored in feature_file
'''
#reshape the net inpurt layer
shape=self.net.blobs['data'].data.shape
channel=shape[1]
width=shape[2]
height=shape[3]
self.net.blobs['data'].reshape(
1,
channel,
width,
height
)
if aux_file!=" ":
try:
labels = open(aux_file,'r')
except:
sys.stderr.write("load label file error! "+aux_file+'\n')
return -1
try:
res = open(feature_path+'feature','w+')
except:
sys.stderr.write("load feature file error! "+featuere_path+'\n')
return -1
if not os.path.isdir(image):
sys.stderr.write("image should be path of folder for storing picture!\n")
return -1
cur_batch = 0
for line in labels:
line_sp = line.rstrip('\n').split('\t')
label = line_sp[1]
img_path = image+line_sp[0]
self.net.blobs['data'].data[...]=\
self.transformer.preprocess(
'data',
caffe.io.load_image(img_path)
)
out = self.net.forward()
#get the label and the feature
feat = self.net.blobs[feature_layer].data[0]
s=''
for item in feat:
s += str(item)+' '
s += label+'\n'
res.write(s)
cur_batch += 1
return cur_batch
else:
if not os.path.isfile(image):
sys.stderr.write(image+" is not a valiuable file name!\n")
return -1
self.net.blobs['data'].data[...]=\
self.transformer.preprocess(
'data',
caffe.io.load_image(image)
)
out = self.net.forward()
#just test
#print feature
feat = self.net.blobs[feature_layer].data[0]
print feat
#print predicted result
print("Predicted class is #{}."\
.format(out[out_layer][0].argmax()))
|
AIML/Sematic-Image-Search
|
src/feature/get_feature.py
|
Python
|
gpl-2.0
| 4,785
|
#!/bin/python3
import csv
import glob
from konfiguracja import *
from hyphenate import hyphenate_word
import operator
import re
import pathlib #Aby sprawdzić, czy plik istnieje
def Wczytaj_ksiazke1(ksiazka):
global d_wyrazy
c_ksiazka=open(ksiazka,"r")
str_ksiazka=c_ksiazka.read()
for wyraz in re.split('[\s\n\+,\.\-?!()\[\]:;/`"„”…]+', str_ksiazka.lower()):
if d_wyrazy.has_key(wyraz):
d_wyrazy[wyraz]+=1
else:
d_wyrazy[wyraz]=1
def Wczytaj_ksiazke2(ksiazka):
global d_wyrazy
c_ksiazka=open(ksiazka,"r")
str_ksiazka=c_ksiazka.read()
for wyraz in re.split('[\s\n\+,\.\-?!()\[\]:;/`"„”…]+', str_ksiazka.lower()):
try:
d_wyrazy[wyraz]+=1
except KeyError:
d_wyrazy[wyraz]=1
def PodzielNaSylaby(wyraz, krotnosc):
sylaby=hyphenate_word(wyraz)
for sylaba in sylaby:
try:
d_sylaby[sylaba]+=krotnosc
except KeyError:
d_sylaby[sylaba]=krotnosc
def WczytajSylaby(sciezkaSylab):
baza=open(sciezkaSylab,'r')
def WczytajLubZrobBazeSylab(sciezkaDoBazy, katalogZKsiazkami):
p = pathlib.Path(sciezkaDoBazy)
if not p.exists():
d_ksiazki=glob.glob(katalogZKsiazkami+"/*")
d_wyrazy={}
for ksiazka in d_ksiazki:
Wczytaj_ksiazke2(ksiazka)
d_sylaby={}
for wyraz,krotnosc in d_wyrazy.items():
PodzielNaSylaby(wyraz, krotnosc)
sorted_sylaby = sorted(d_sylaby.items(), key=operator.itemgetter(1),reverse=True)
c_bazaSylab=open(sciezkaDoBazy,"w")
#pickle.dump(obj=d_sylaby, file=c_bazaSylab)
wr=csv.writer(c_bazaSylab)
for el in sorted_sylaby:
wr.writerow(el)
return (sorted_sylaby)
else:
c_bazaSylab=open(sciezkaDoBazy,"r")
rd=csv.reader(c_bazaSylab)
sorted_sylaby=list()
for el in rd:
sorted_sylaby.append(el)
return (sorted_sylaby)
################################################################################################################################################
def main():
d_ksiazki=glob.glob(slownik_dir+"/*")
d_wyrazy={}
for ksiazka in d_ksiazki:
Wczytaj_ksiazke2(ksiazka)
d_sylaby={}
for wyraz,krotnosc in d_wyrazy.items():
PodzielNaSylaby(wyraz, krotnosc)
sorted_sylaby = sorted(d_sylaby.items(), key=operator.itemgetter(1),reverse=True)
c_bazaSylab=open(path_bazaSylab,"w")
#pickle.dump(obj=d_sylaby, file=c_bazaSylab)
wr=csv.writer(c_bazaSylab)
for el in sorted_sylaby:
wr.writerow(el)
if __name__ == '__main__':
main()
# path_SlownikSylab
# slownik_dir="/home/Adama-docs/Adam/Adam/MyDocs/praca/Python/ksiazki"
|
adamryczkowski/powtarzane-cwiczenia
|
import_books.py
|
Python
|
gpl-2.0
| 2,890
|
#!/usr/bin/python2
#
# term-war
#
# Copyright (c) 2013
#
# Author Branislav Blaskovic <branislav@blaskovic.sk>
#
import sys
from decorations import Colors
Color = Colors()
class Writer:
def __init__(self):
pass
def out(self, text, color=Color.ENDC, new_line=True):
# Write some message to terminal in choosen color
sys.stdout.write(color + text + Color.ENDC)
if new_line:
sys.stdout.write("\n")
writer = Writer()
class Prompt:
def __init__(self):
pass
def ask(self, text, color=Color.ENDC):
writer.out(text, color)
return raw_input("> ")
|
blaskovic/term-war
|
src/interaction.py
|
Python
|
gpl-2.0
| 631
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.common.vm import VM
from cfme.control.explorer import PolicyProfile, VMCompliancePolicy, Action, VMControlPolicy
from cfme.infrastructure.virtual_machines import Vm
from utils.log import logger
from utils.providers import setup_a_provider as _setup_a_provider
from utils.wait import wait_for
@pytest.fixture(scope="module")
def setup_a_provider():
return _setup_a_provider("infra")
@pytest.fixture(scope="module")
def vmware_provider():
return _setup_a_provider("infra", "virtualcenter")
@pytest.fixture(scope="module")
def vmware_vm(request, vmware_provider):
vm = VM.factory("test_control_{}".format(fauxfactory.gen_alpha().lower()), vmware_provider)
vm.create_on_provider(find_in_cfme=True)
request.addfinalizer(vm.delete_from_provider)
return vm
@pytest.mark.meta(blockers=[1155284])
@pytest.mark.ignore_stream("5.2")
def test_scope_windows_registry_stuck(request, setup_a_provider):
"""If you provide Scope checking windows registry, it messes CFME up. Recoverable."""
policy = VMCompliancePolicy(
"Windows registry scope glitch testing Compliance Policy",
active=True,
scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, "
r"some value, INCLUDES, some content)"
)
request.addfinalizer(lambda: policy.delete() if policy.exists else None)
policy.create()
profile = PolicyProfile(
"Windows registry scope glitch testing Compliance Policy",
policies=[policy]
)
request.addfinalizer(lambda: profile.delete() if profile.exists else None)
profile.create()
# Now assign this malformed profile to a VM
vm = VM.factory(Vm.get_first_vm_title(provider=setup_a_provider), setup_a_provider)
vm.assign_policy_profiles(profile.description)
# It should be screwed here, but do additional check
pytest.sel.force_navigate("dashboard")
pytest.sel.force_navigate("infrastructure_virtual_machines")
assert "except" not in pytest.sel.title().lower()
vm.unassign_policy_profiles(profile.description)
@pytest.mark.meta(blockers=[1209538], automates=[1209538])
def test_folder_field_scope(request, vmware_provider, vmware_vm):
"""This test tests the bug that makes the folder filter in expression not work.
Prerequisities:
* A VMware provider.
* A VM on the provider.
* A tag to assign.
Steps:
* Read the VM's 'Parent Folder Path (VMs & Templates)' from its summary page.
* Create an action for assigning the tag to the VM.
* Create a policy, for scope use ``Field``, field name
``VM and Instance : Parent Folder Path (VMs & Templates)``, ``INCLUDES`` and the
folder name as stated on the VM's summary page.
* Assign the ``VM Discovery`` event to the policy.
* Assign the action to the ``VM Discovery`` event.
* Create a policy profile and assign the policy to it.
* Assign the policy profile to the provider.
* Delete the VM from the CFME database.
* Initiate provider refresh and wait for VM to appear again.
* Assert that the VM gets tagged by the tag.
"""
# Retrieve folder location
folder = None
tags = vmware_vm.get_tags()
for tag in tags:
if "Parent Folder Path (VMs & Templates)" in tag:
folder = tag.split(":", 1)[-1].strip()
logger.info("Detected folder: {}".format(folder))
break
else:
pytest.fail("Could not read the folder from the tags:\n{}".format(repr(tags)))
# Create Control stuff
action = Action(
fauxfactory.gen_alpha(),
"Tag", dict(tag=("My Company Tags", "Service Level", "Platinum")))
action.create()
request.addfinalizer(action.delete)
policy = VMControlPolicy(
fauxfactory.gen_alpha(),
scope=(
"fill_field(VM and Instance : Parent Folder Path (VMs & Templates), "
"INCLUDES, {})".format(folder)))
policy.create()
request.addfinalizer(policy.delete)
policy.assign_events("VM Discovery")
request.addfinalizer(policy.assign_events) # Unassigns
policy.assign_actions_to_event("VM Discovery", action)
profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy])
profile.create()
request.addfinalizer(profile.delete)
# Assign policy profile to the provider
vmware_provider.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: vmware_provider.unassign_policy_profiles(profile.description))
# Delete and rediscover the VM
vmware_vm.delete()
vmware_vm.wait_for_delete()
vmware_provider.refresh_provider_relationships()
vmware_vm.wait_to_appear()
# Wait for the tag to appear
wait_for(
vmware_vm.get_tags, num_sec=600, delay=15,
fail_condition=lambda tags: "Service Level: Platinum" not in tags, message="vm be tagged")
@pytest.mark.meta(blockers=[1243357], automates=[1243357])
def test_invoke_custom_automation(request):
"""This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear.
Steps:
* Go create new action, select Invoke Custom Automation
* The form with additional fields should appear
"""
# The action is to have all possible fields filled, that way we can ensure it is good
action = Action(
fauxfactory.gen_alpha(),
"Invoke a Custom Automation",
dict(
message=fauxfactory.gen_alpha(),
request=fauxfactory.gen_alpha(),
attribute_1=fauxfactory.gen_alpha(),
value_1=fauxfactory.gen_alpha(),
attribute_2=fauxfactory.gen_alpha(),
value_2=fauxfactory.gen_alpha(),
attribute_3=fauxfactory.gen_alpha(),
value_3=fauxfactory.gen_alpha(),
attribute_4=fauxfactory.gen_alpha(),
value_4=fauxfactory.gen_alpha(),
attribute_5=fauxfactory.gen_alpha(),
value_5=fauxfactory.gen_alpha(),))
@request.addfinalizer
def _delete_action():
if action.exists:
action.delete()
action.create()
|
thom-at-redhat/cfme_tests
|
cfme/tests/control/test_bugs.py
|
Python
|
gpl-2.0
| 6,198
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebAccess Admin Flask Blueprint."""
from __future__ import unicode_literals
from flask import Blueprint, redirect, url_for
from flask_breadcrumbs import register_breadcrumb
from flask_login import login_required
from invenio_base.decorators import sorted_by, templated
from invenio_base.i18n import _
from invenio_ext.principal import permission_required
from invenio_access.models import AccACTION, AccROLE
from invenio_accounts.models import User
# from invenio_access.local_config import \
# FIXME
WEBACCESSACTION = 'cfgwebaccess'
blueprint = Blueprint('webaccess_admin', __name__,
url_prefix="/admin/webaccess",
template_folder='../templates',
static_folder='../static')
@blueprint.route('/', methods=['GET', 'POST'])
@login_required
@permission_required(WEBACCESSACTION)
@templated('access/admin/index.html')
@register_breadcrumb(blueprint, 'admin.webaccess_admin', _('WebAccess'))
def index():
"""Index."""
actions = [
dict(url=url_for('.rolearea'),
title=_('Role Area'),
description=_('Main area to configure administration rights '
'and authorization rules.')),
dict(url=url_for('.actionarea'),
title=_('Action Area'),
description=_('Configure administration rights with the '
'actions as starting point.')),
dict(url=url_for('.userarea'),
title=_('User Area'),
description=_('Configure administration rights with the '
'users as starting point.')),
dict(url=url_for('.resetarea'),
title=_('Reset Area'),
description=_('Reset roles, actions and authorizations.')),
dict(url=url_for('.manageaccounts'),
title=_('Manage Accounts Area'),
description=_('Manage user accounts.')),
dict(url=url_for('.delegate_startarea'),
title=_('Delegate Rights - With Restrictions'),
description=_('Delegate your rights for some roles.')),
]
return dict(actions=actions)
@blueprint.route('/actionarea', methods=['GET', 'POST'])
@login_required
@permission_required(WEBACCESSACTION)
@sorted_by(AccACTION)
@templated('access/admin/actionarea.html')
def actionarea(sort=False, filter=None):
"""Action area."""
if sort is False:
sort = AccACTION.name
actions = AccACTION.query.order_by(sort).filter(filter).all()
return dict(actions=actions)
@blueprint.route('/rolearea', methods=['GET', 'POST'])
@login_required
@permission_required(WEBACCESSACTION)
@sorted_by(AccROLE)
@templated('access/admin/rolearea.html')
def rolearea(sort=False, filter=None):
"""Role area."""
if sort is False:
sort = AccROLE.name
roles = AccROLE.query.order_by(sort).filter(filter).all()
return dict(roles=roles)
@blueprint.route('/showroledetails/<int:id_role>', methods=['GET', 'POST'])
@login_required
@permission_required(WEBACCESSACTION)
@templated('access/admin/showroledetails.html')
def showroledetails(id_role):
"""Show role details."""
return dict(role=AccROLE.query.get_or_404(id_role))
@blueprint.route('/userarea', methods=['GET', 'POST'])
@login_required
@permission_required(WEBACCESSACTION)
@sorted_by(User)
@templated('access/admin/userarea.html')
def userarea(sort=False, filter=None):
"""User area."""
if sort is False:
sort = User.nickname
users = User.query.order_by(sort).filter(filter).all()
return dict(users=users)
@blueprint.route('/resetarea', methods=['GET', 'POST'])
def resetarea():
"""Reset area."""
# FIXME reimplement this function
return redirect('/admin/webaccess/webaccessadmin.py/resetarea')
@blueprint.route('/manageaccounts', methods=['GET', 'POST'])
def manageaccounts():
"""Manage accounts."""
# FIXME reimplement this function
return redirect('/admin/webaccess/webaccessadmin.py/manageaccounts')
@blueprint.route('/delegate_startarea', methods=['GET', 'POST'])
def delegate_startarea():
"""Delegate start area."""
# FIXME reimplement this function
return redirect('/admin/webaccess/webaccessadmin.py/delegate_startarea')
|
nharraud/invenio-access
|
invenio_access/views/admin.py
|
Python
|
gpl-2.0
| 5,025
|
# -*- coding: utf-8 -*-
from itertools import groupby
try:
from django.db import IntegrityError
except:
pass
from django.contrib.auth.decorators import user_passes_test
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.simplejson import dumps as json_dumps
from tcms.core.logs.models import TCMSLogModel
from tcms.core.utils import QuerySetIterationProxy
from tcms.management.models import TCMSEnvGroup
from tcms.management.models import TCMSEnvGroupPropertyMap
from tcms.management.models import TCMSEnvProperty
from tcms.management.models import TCMSEnvValue
MODULE_NAME = "management"
def environment_groups(request, template_name='environment/groups.html'):
"""
Environements list
"""
env_groups = TCMSEnvGroup.objects
# Initial the response to browser
ajax_response = {'rc': 0, 'response': 'ok'}
has_perm = request.user.has_perm
user_action = request.REQUEST.get('action')
# Add action
if user_action == 'add':
if not has_perm('management.add_tcmsenvgroup'):
ajax_response['response'] = 'Permission denied.'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
group_name = request.REQUEST.get('name')
# Get the group name of envrionment from javascript
if not group_name:
ajax_response['response'] = 'Environment group name is required.'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
try:
env = env_groups.create(name=group_name,
manager_id=request.user.id,
modified_by_id=None)
env.log_action(who=request.user,
action='Initial env group %s' % env.name)
ajax_response['id'] = env.id
return HttpResponse(json_dumps(ajax_response))
except IntegrityError, error:
if error[1].startswith('Duplicate'):
response_msg = 'Environment group named \'%s\' already ' \
'exists, please select another name.' % group_name
ajax_response['response'] = response_msg
else:
ajax_response['response'] = error[1]
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
except:
ajax_response['response'] = 'Unknown error.'
return HttpResponse(json_dumps(ajax_response))
# Del action
if user_action == 'del':
if request.REQUEST.get('id'):
try:
env = env_groups.get(id=request.REQUEST['id'])
manager_id = env.manager_id
if request.user.id != manager_id:
if not has_perm('management.delete_tcmsenvgroup'):
ajax_response['response'] = 'Permission denied.'
return HttpResponse(json_dumps(ajax_response))
env.delete()
ajax_response['response'] = 'ok'
except TCMSEnvGroup.DoesNotExist, error:
raise Http404(error)
try:
env_group_property_map = env_groups.filter(
group_id=request.REQUEST['id']
)
env_group_property_map.delete()
except:
pass
return HttpResponse(json_dumps(ajax_response))
else:
pass
if not has_perm('management.delete_tcmsenvgroup'):
ajax_response['response'] = 'Permission denied.'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
# Modify actions
if user_action == 'modify':
if not has_perm('management.change_tcmsenvgroup'):
ajax_response['response'] = 'Permission denied.'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
try:
env = env_groups.get(id=request.REQUEST['id'])
if request.REQUEST.get('status') in ['0', '1']:
env.is_active = int(request.REQUEST['status'])
action = 'Change env group status to %s' % env.is_active
env.log_action(who=request.user, action=action)
else:
ajax_response['response'] = 'Argument illegel.'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
env.save()
except TCMSEnvGroup.DoesNotExist, error:
raise Http404(error)
# Search actions
if user_action == 'search':
if request.REQUEST.get('name'):
env_groups = env_groups.filter(
name__icontains=request.REQUEST['name']
)
else:
env_groups = env_groups.all()
else:
env_groups = env_groups.all().order_by('is_active')
# Get properties for each group
qs = TCMSEnvGroupPropertyMap.objects.filter(group__in=env_groups)
qs = qs.values('group__pk', 'property__name')
qs = qs.order_by('group__pk', 'property__name').iterator()
properties = dict([(key, list(value)) for key, value in
groupby(qs, lambda item: item['group__pk'])])
# Get logs for each group
env_group_ct = ContentType.objects.get_for_model(TCMSEnvGroup)
qs = TCMSLogModel.objects.filter(content_type=env_group_ct,
object_pk__in=env_groups)
qs = qs.values('object_pk', 'who__username', 'date', 'action')
qs = qs.order_by('object_pk').iterator()
# we have to convert object_pk to an integer due to it's a string stored in
# database.
logs = dict([(int(key), list(value)) for key, value in
groupby(qs, lambda log: log['object_pk'])])
env_groups = env_groups.select_related('modified_by', 'manager').iterator()
env_groups = QuerySetIterationProxy(env_groups,
properties=properties,
another_logs=logs)
context_data = {
'environments': env_groups,
'module': 'env',
}
return render_to_response(template_name, context_data,
context_instance=RequestContext(request))
@user_passes_test(lambda u: u.has_perm('management.change_tcmsenvgroup'))
def environment_group_edit(request,
template_name='environment/group_edit.html'):
"""
Assign properties to environment group
"""
# Initial the response
response = ''
environment_id = request.REQUEST.get('id', None)
if environment_id is None:
raise Http404
try:
environment = TCMSEnvGroup.objects.get(pk=environment_id)
except TCMSEnvGroup.DoesNotExist:
raise Http404
try:
de = TCMSEnvGroup.objects.get(name=request.REQUEST.get('name'))
if environment != de:
response = 'Duplicated name already exists, please change to ' \
'another name.'
context_data = {
'environment': environment,
'properties': TCMSEnvProperty.get_active(),
'selected_properties': environment.property.all(),
'message': response,
}
return render_to_response(template_name, context_data,
context_instance=RequestContext(request))
except TCMSEnvGroup.DoesNotExist:
pass
if request.REQUEST.get('action') == 'modify': # Actions of modify
environment_name = request.REQUEST['name']
if environment.name != environment_name:
environment.name = environment_name
environment.log_action(
who=request.user,
action='Modify name %s from to %s' % (environment.name,
environment_name))
if environment.is_active != request.REQUEST.get('enabled', False):
environment.is_active = request.REQUEST.get('enabled', False)
environment.log_action(
who=request.user,
action='Change env group status to %s' % environment.is_active)
environment.modified_by_id = request.user.id
environment.save()
# Remove all of properties of the group.
TCMSEnvGroupPropertyMap.objects.filter(
group__id=environment.id).delete()
# Readd the property to environemnt group and log the action
for property_id in request.REQUEST.getlist('selected_property_ids'):
TCMSEnvGroupPropertyMap.objects.create(group_id=environment.id,
property_id=property_id)
property_values = environment.property.values_list('name', flat=True)
environment.log_action(
who=request.user,
action='Properties changed to %s' % (', '.join(property_values)))
response = 'Environment group saved successfully.'
context_data = {
'environment': environment,
'properties': TCMSEnvProperty.get_active(),
'selected_properties': environment.property.all(),
'message': response,
}
return render_to_response(template_name, context_data,
context_instance=RequestContext(request))
def environment_properties(request, template_name='environment/property.html'):
"""
Edit environemnt properties and values belong to
"""
# Initial the ajax response
ajax_response = {'rc': 0, 'response': 'ok'}
message = ''
has_perm = request.user.has_perm
user_action = request.REQUEST.get('action')
# Actions of create properties
if user_action == 'add':
if not has_perm('management.add_tcmsenvproperty'):
ajax_response['response'] = 'Permission denied'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
property_name = request.REQUEST.get('name')
if not property_name:
ajax_response['response'] = 'Property name is required'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
try:
new_property = TCMSEnvProperty.objects.create(name=property_name)
ajax_response['id'] = new_property.id
ajax_response['name'] = new_property.name
except IntegrityError, error:
if error[1].startswith('Duplicate'):
resp_msg = 'Environment proprerty named \'%s\' already ' \
'exists, please select another name.' % property_name
else:
resp_msg = error[1]
ajax_response['rc'] = 1
ajax_response['response'] = resp_msg
return HttpResponse(json_dumps(ajax_response))
return HttpResponse(json_dumps(ajax_response))
# Actions of edit a exist properties
if user_action == 'edit':
if not has_perm('management.change_tcmsenvproperty'):
ajax_response['response'] = 'Permission denied'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
if not request.REQUEST.get('id'):
ajax_response['response'] = 'ID is required'
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
try:
env_property = TCMSEnvProperty.objects.get(
id=request.REQUEST['id'])
env_property.name = request.REQUEST.get('name', env_property.name)
try:
env_property.save()
except IntegrityError, error:
ajax_response['response'] = error[1]
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
except TCMSEnvProperty.DoesNotExist, error:
ajax_response['response'] = error[1]
ajax_response['rc'] = 1
return HttpResponse(json_dumps(ajax_response))
# Actions of remove properties
if user_action == 'del':
if not has_perm('management.delete_tcmsenvproperty'):
message = 'Permission denied'
property_ids = request.REQUEST.getlist('id')
if has_perm('management.delete_tcmsenvproperty') and property_ids:
try:
filter = TCMSEnvGroupPropertyMap.objects.filter
env_group_property_map = filter(property__id__in=property_ids)
env_group_property_map and env_group_property_map.delete()
env_group_value_map = filter(property__id__in=property_ids)
env_group_value_map and env_group_value_map.delete()
except:
pass
try:
env_properties = TCMSEnvProperty.objects.filter(
id__in=property_ids)
property_values = '\', \''.join(
env_properties.values_list('name', flat=True))
message = 'Remove test properties %s successfully.' % \
property_values
env_properties.delete()
except TCMSEnvProperty.DoesNotExist as error:
message = error[1]
# Actions of remove properties
if user_action == 'modify':
if not has_perm('management.change_tcmsenvproperty'):
message = 'Permission denied'
property_ids = request.REQUEST.getlist('id')
if has_perm('management.change_tcmsenvproperty') and property_ids:
try:
env_properties = TCMSEnvProperty.objects.filter(
id__in=property_ids)
if request.REQUEST.get('status') in ['0', '1']:
for env_property in env_properties:
env_property.is_active = int(request.REQUEST['status'])
env_property.save()
property_values = '\', \''.join(
env_properties.values_list('name', flat=True))
message = 'Modify test properties status \'%s\' ' \
'successfully.' % property_values
else:
message = 'Argument illegel'
except TCMSEnvProperty.DoesNotExist as error:
message = error[1]
try:
filter = TCMSEnvGroupPropertyMap.objects.filter
env_group_property_map = filter(property__id__in=property_ids)
env_group_property_map and env_group_property_map.delete()
env_group_value_map = filter(property__id__in=property_ids)
env_group_value_map and env_group_value_map.delete()
except:
pass
if request.is_ajax():
ajax_response['rc'] = 1
ajax_response['response'] = 'Unknown action'
return HttpResponse(json_dumps(ajax_response))
context_data = {
'message': message,
'properties': TCMSEnvProperty.objects.all().order_by('-is_active')
}
return render_to_response(template_name, context_data,
context_instance=RequestContext(request))
def environment_property_values(request):
"""
List values of property
"""
template_name = 'environment/ajax/property_values.html'
message = ''
duplicated_property_value = []
if not request.REQUEST.get('property_id'):
return HttpResponse('Property ID should specify')
try:
qs = TCMSEnvProperty.objects.select_related('value')
property = qs.get(id=request.REQUEST['property_id'])
except TCMSEnvProperty.DoesNotExist, error:
return HttpResponse(error)
user_action = request.REQUEST.get('action')
if user_action == 'add' and request.REQUEST.get('value'):
if not request.user.has_perm('management.add_tcmsenvvalue'):
return HttpResponse('Permission denied')
for value in request.REQUEST['value'].split(','):
try:
property.value.create(value=value)
except IntegrityError, error:
if error[1].startswith('Duplicate'):
duplicated_property_value.append(value)
if user_action == 'edit' and request.REQUEST.get('id'):
if not request.user.has_perm('management.change_tcmsenvvalue'):
return HttpResponse('Permission denied')
try:
property_value = property.value.get(id=request.REQUEST['id'])
property_value.value = request.REQUEST.get('value',
property_value.value)
try:
property_value.save()
except IntegrityError, error:
if error[1].startswith('Duplicate'):
duplicated_property_value.append(property_value.value)
except TCMSEnvValue.DoesNotExist, error:
return HttpResponse(error[1])
if user_action == 'modify' and request.REQUEST.get('id'):
if not request.user.has_perm('management.change_tcmsenvvalue'):
return HttpResponse('Permission denied')
values = property.value.filter(id__in=request.REQUEST.getlist('id'))
if request.REQUEST.get('status') in ['0', '1']:
for value in values:
value.is_active = int(request.REQUEST['status'])
value.save()
else:
return HttpResponse('Argument illegel')
if duplicated_property_value:
message = 'Value(s) named \'%s\' already exists in this property, ' \
'please select another name.' % '\', \''.join(
duplicated_property_value)
values = property.value.all()
context_data = {
'property': property,
'values': values,
'message': message,
}
return render_to_response(template_name, context_data,
context_instance=RequestContext(request))
|
ShaolongHu/Nitrate
|
tcms/management/views.py
|
Python
|
gpl-2.0
| 18,151
|
# pylint: disable=missing-docstring,too-few-public-methods,invalid-name
from collections import defaultdict
class A:
pass
class B:
pass
A.__class__ = B
A.__class__ = str
A.__class__ = float
A.__class__ = dict
A.__class__ = set
A.__class__ = defaultdict
A.__class__ = defaultdict(str) # [invalid-class-object]
A.__class__ = 1 # [invalid-class-object]
|
PyCQA/pylint
|
tests/functional/i/invalid/invalid_class_object.py
|
Python
|
gpl-2.0
| 382
|
'''app.notify.admin'''
import os
from bson import json_util
from flask import g, request
from .. import get_keys
from logging import getLogger
log = getLogger(__name__)
#-------------------------------------------------------------------------------
def update_agency_conf():
log.info('updating %s with value %s', request.form['field'], request.form['value'])
'''old_value = g.db['groups'].find_one({'name':user['agency']})[request.form['field']]
if type(old_value) != type(request.form['value']):
log.error('type mismatch')
return False
'''
try:
r = g.db['groups'].update_one(
{'name':g.group},
{'$set':{request.form['field']:request.form['value']}}
)
except Exception as e:
log.error(str(e))
return True
|
SeanEstey/Bravo
|
app/notify/admin.py
|
Python
|
gpl-2.0
| 803
|
#!/usr/bin/env python3
import unittest
from trans import g2pbr
class PhonTest(unittest.TestCase):
def setUp(self):
self.test_words = {
'financiamento': 'finãsiamẽto',
}
self.phon = g2pbr.Phon(rules='trans/rules.pt')
def test_words(self):
for w in self.test_words:
self.assertEqual(self.phon.run([w])[0], self.test_words[w])
if __name__ == '__main__':
unittest.main()
|
shoeki/ling
|
tests.py
|
Python
|
gpl-2.0
| 462
|
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule ConfigureSystemAuthentication
@author: ekkehard j. koch
@author: Breen Malmberg
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.ConfigureSystemAuthentication import ConfigureSystemAuthentication
class zzzTestRuleConfigureSystemAuthentication(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = ConfigureSystemAuthentication(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def test_account_locking_detection(self):
''' '''
self.rule.report()
result = bool(self.rule.usingpamfail or self.rule.usingpamtally2)
self.assertTrue(result) # if false, then system is using neither in which case the rule will be in trouble
def test_editor_creation(self):
''' '''
result1 = False
result2 = False
self.rule.report()
if self.rule.editor1:
result1 = True
if self.rule.editor2:
result2 = True
self.assertTrue(result1)
self.assertTrue(result2)
def setConditionsForRule(self):
'''Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
unittest.main()
|
CSD-Public/stonix
|
src/tests/rules/unit_tests/zzzTestRuleConfigureSystemAuthentication.py
|
Python
|
gpl-2.0
| 4,993
|
# coding: utf-8
import pygame, sys
from pygame.locals import *
import script
import menu
wind_size = 568,500
back = pygame.image.load('image/tela_game_over.jpg')
reiniciar = pygame.image.load('image/botoes/reiniciar.png')
voltar = pygame.image.load('image/botoes/voltar.png')
screen = pygame.display.set_mode(wind_size)
pos2 = []
pygame.init()
def reinicio():
zera_pontos = ''
pos2 = list((pygame.mouse.get_pos()))
click_mouse = pygame.mouse.get_pressed()
screen.blit(back, (0,0))
screen.blit(reiniciar, (340, 185))
screen.blit(voltar, (340, 279))
#exibe a pontuacao da ultima partida
pontuacao = open("pontos.txt","r").read()
font = pygame.font.Font("fonts/font2.ttf", 30)
mensagem = font.render(pontuacao, True, (0,0,0))
screen.blit(mensagem, [450,110])
if pos2[0] >= 281 and pos2[0] <= 437 and pos2[1] >= 187 and pos2[1] <= 269:
if click_mouse[0] == 1:
zera_pontos = open("pontos.txt","w").write(str(0)) #zera a ultima pontuação
script.main()
sys.exit()
if pos2[0] >= 282 and pos2[0] <= 438 and pos2[1] >= 282 and pos2[1] <= 368:
if click_mouse[0] == 1:
menu.main()
sys.exit()
pygame.display.flip()
|
anapaulabarros/flyingbee
|
reinicio.py
|
Python
|
gpl-2.0
| 1,246
|
from csv_utils import *
import pylab as p
import sys
directory="/home/jspaleta/scratch/king_salmon_vnadata_sept_9_card7redo"
plotdir="ksr_paired_recv_path_plots"
radar="KSR"
plot_directory=os.path.join(directory,plotdir)
if not os.path.exists(plot_directory): os.mkdir(plot_directory)
colors={0:"red",1:"blue",2:"black",3:"green",4:"cyan",5:"yellow"}
for bmnum in range(16):
p.figure(200+bmnum)
p.clf()
p.figure(300+bmnum)
p.clf()
p.figure(400+bmnum)
p.clf()
p.figure(500+bmnum)
p.clf()
for bmnum in range(16):
last_main=None
last_interf=None
p.figure(104)
p.clf()
for card in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]:
data_interf=None
data_main=None
if card in [7,8,9,10]:
data_interf=csv_data()
data_interf.card=card+10
data_interf.beam=bmnum
read_csv(directory,data_interf)
else:
data_interf=None
data_main=csv_data()
data_main.card=card
data_main.beam=bmnum
read_csv(directory,data_main)
freqs=p.array(data_main.freqs)
df=freqs[1]-freqs[0]
f10_index=int((10E6-freqs[0])/df)+1
f12_index=int((12E6-freqs[0])/df)+1
f15_index=int((15E6-freqs[0])/df)+1
main_tdelay=p.array(data_main.tdelay)
main_ephase=p.array(data_main.ephase)
main_ephase_slope=(main_ephase[700]-main_ephase[500])/(freqs[700]-freqs[500])
main_ephase_offset=main_ephase[0]-main_ephase_slope*freqs[0]
print main_ephase_offset,main_ephase_slope,main_ephase[0],freqs[0]
main_phase_from_tdelay=-main_tdelay*freqs*360.0
main_ephase_diff=p.diff(data_main.ephase)
freq_diff=p.diff(data_main.freqs)
main_ephase_tdelay=-main_ephase_diff/360.0/freq_diff
if last_main is not None:
main_nearest_pair_phase_diff=p.array(data_main.ephase)-p.array(last_main.ephase)
if card in [7,8,9,10]:
interf_tdelay=p.array(data_interf.tdelay)
diff_tdelay=interf_tdelay-main_tdelay
phase_diff=p.array(data_interf.ephase)-p.array(data_main.ephase)
phase_diff=(phase_diff % 360.0)
phase_diff=p.array([(ph > 180) * -360 + ph for ph in phase_diff])
phase_diff_diff=p.diff(phase_diff)
phase_diff_tdelay=-(phase_diff_diff/360.0)/freq_diff
interf_ephase_diff=p.diff(data_interf.ephase)
interf_ephase_tdelay=-interf_ephase_diff/360.0/freq_diff
diff_ephase_tdelay=interf_ephase_tdelay-main_ephase_tdelay
smooth_phase_diff_tdelay=p.zeros_like(phase_diff_tdelay)
sum_count=p.zeros_like(phase_diff_tdelay)
for i in xrange(len(phase_diff_tdelay)):
for j in xrange(40):
if i+j < len(phase_diff_tdelay):
smooth_phase_diff_tdelay[i]+=phase_diff_tdelay[i+j]
sum_count[i]+=1
if i-j >= 0:
smooth_phase_diff_tdelay[i]+=phase_diff_tdelay[i-j]
sum_count[i]+=1
smooth_phase_diff_tdelay=smooth_phase_diff_tdelay/sum_count
if last_interf is not None:
interf_nearest_pair_phase_diff=p.array(data_interf.ephase)-p.array(last_interf.ephase)
p.figure(200+bmnum)
p.plot(freqs*1E-6,main_tdelay*1E9,color=colors[ card % 6 ],label="Card %02d" % (card) )
if card in [7,8,9,10]:
p.figure(300+bmnum)
p.plot(freqs*1E-6,interf_tdelay*1E9,color=colors[card % 6 ],label="Card %02d" % (card+10) )
if last_main is not None:
p.figure(400+bmnum)
p.plot(freqs*1E-6,main_nearest_pair_phase_diff,color=colors[ card % 6 ],label="Card %02d-%02d" % (card,card-1) )
if last_interf is not None:
if card in [7,8,9,10]:
p.figure(500+bmnum)
p.plot(freqs*1E-6,interf_nearest_pair_phase_diff,color=colors[ card % 6 ],label="Card %02d-%02d" % (card+10,card-1+10) )
if card in [7,8,9,10]:
p.figure(100)
p.clf()
p.grid(True)
# p.plot(freqs[0:-1]*1E-6,diff_ephase_tdelay*1E9,color="black")
p.plot(freqs*1E-6,diff_tdelay*1E9,color="black",label="Cards: %02d-%02d" % (card,card+10))
# p.plot(freqs[0:-1]*1E-6,smooth_phase_diff_tdelay*1E9,color="black",label="Calculated from Diff of VNA Phase Measurement")
# p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((-40,40))
p.xlabel("Freq [MHz]")
p.ylabel("tdiff [nsec]")
p.title("%s Recv Path Time Delay Difference\n Card %02d and Card %02d Beam %d" % \
(radar,data_main.card,data_interf.card,data_main.beam))
figfile=os.path.join(plot_directory,"tdiff_c%02d-c%02d_b%02d.png" % (card,card+10,bmnum))
p.savefig(figfile)
p.figure(102)
p.clf()
p.grid(True)
p.plot(freqs*1E-6,phase_diff,color="black",label="Phase Diff")
p.plot(freqs*1E-6,data_main.phase,color="red",label="Card %02d" % (card) )
p.plot(freqs*1E-6,data_interf.phase,color="blue",label="Card %02d" % (card+10) )
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((-200,200))
p.figtext(0.15,0.85,"10 MHz: %3.1f" % (phase_diff[f10_index]),backgroundcolor="white")
p.figtext(0.35,0.85,"12 MHz: %3.1f" % (phase_diff[f12_index]),backgroundcolor="white")
p.figtext(0.55,0.85,"15 MHz: %3.1f" % (phase_diff[f15_index]),backgroundcolor="white")
p.xlabel("Freq [MHz]")
p.ylabel("phase [deg]")
p.title("%s Recv Path Phase Difference\n Card %02d and Card %02d Beam %d" % \
(radar,data_main.card,data_interf.card,data_main.beam))
figfile=os.path.join(plot_directory,"phase_diff_c%02d-c%02d_b%02d.png" % (card,card+10,bmnum))
p.savefig(figfile)
p.figure(103)
p.clf()
p.grid(True)
p.plot(freqs*1E-6,main_tdelay*1E9,color="red",label="Card %02d" % (card) )
p.plot(freqs*1E-6,interf_tdelay*1E9,color="blue",label="Card %02d" % (card+10) )
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((0,1000))
p.xlabel("Freq [MHz]")
p.ylabel("Group Delay [nsec]")
p.title("%s Recv Path Group Delay Comparison\n Card %02d and Card %02d Beam %d" % \
(radar,data_main.card,data_interf.card,data_main.beam))
figfile=os.path.join(plot_directory,"group_delay_c%02d-c%02d_b%02d.png" % (card,card+10,bmnum))
p.savefig(figfile)
p.figure(104)
# p.plot(freqs[0:-1]*1E-6,diff_ephase_tdelay*1E9,color="black")
p.plot(freqs*1E-6,diff_tdelay*1E9,color=colors[card % 6],label="Cards: %02d-%02d" % (card,card+10))
# p.plot(freqs[0:-1]*1E-6,smooth_phase_diff_tdelay*1E9,color="black",label="Calculated from Diff of VNA Phase Measurement")
p.figure(105)
p.clf()
p.grid(True)
p.plot(freqs*1E-6,main_ephase-main_ephase_offset,color="red",label="Card %02d ephase" % (card) )
p.plot(freqs*1E-6,main_ephase_slope*freqs,color="green",label="Card %02d ephase from slope" % (card) )
p.plot(freqs*1E-6,main_phase_from_tdelay,color="blue",label="Card %02d ephase from tdelay" % (card) )
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((-5000,0))
p.xlabel("Freq [MHz]")
p.ylabel("Phase [deg]")
p.title("%s Recv Path Ephase Comparison\n Card %02d Beam %d" % \
(radar,data_main.card,data_main.beam))
figfile=os.path.join(plot_directory,"main_ephase_c%02d_b%02d.png" % (card,bmnum))
p.savefig(figfile)
last_main=data_main
last_interf=data_interf
p.figure(200+bmnum)
p.grid(True)
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((0,1000))
p.xlabel("Freq [MHz]")
p.ylabel("Group Delay [nsec]")
p.title("%s Main Array Group Delay Comparison\n Beam %d" % \
(radar,data_main.beam))
figfile=os.path.join(plot_directory,"main_beam_group_delay_b%02d.png" % (bmnum))
p.savefig(figfile)
p.figure(300+bmnum)
p.grid(True)
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((0,1000))
p.xlabel("Freq [MHz]")
p.ylabel("Group Delay [nsec]")
p.title("%s Interf Array Group Delay Comparison\n Beam %d" % \
(radar,data_main.beam))
figfile=os.path.join(plot_directory,"interf_beam_group_delay_b%02d.png" % (bmnum))
p.savefig(figfile)
p.figure(400+bmnum)
p.grid(True)
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((-360,360))
p.xlabel("Freq [MHz]")
p.ylabel("Phase difference [deg]")
p.title("%s Main Array Nearest Neighbor Phase Diff Comparison\n Beam %d" % \
(radar,data_main.beam))
figfile=os.path.join(plot_directory,"main_beam_pair_phase_diff_b%02d.png" % (bmnum))
p.savefig(figfile)
p.figure(500+bmnum)
p.grid(True)
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim(-360,360)
p.xlabel("Freq [MHz]")
p.ylabel("Phase difference [deg]")
p.title("%s Interf Array Nearest Neighbor Phase Diff Comparison\n Beam %d" % \
(radar,data_main.beam))
figfile=os.path.join(plot_directory,"interf_beam_pair_phase_diff_b%02d.png" % (bmnum))
p.savefig(figfile)
p.figure(104)
p.grid(True)
p.legend(loc=4)
ax=p.gca()
ax.set_xlim((8,20))
ax.set_ylim((-40,40))
p.xlabel("Freq [MHz]")
p.ylabel("tdiff [nsec]")
p.title("%s Recv Path Time Delay Difference\nBeam %d" % \
(radar,data_main.beam))
figfile=os.path.join(plot_directory,"beam_tdiff_b%02d.png" % (bmnum))
p.savefig(figfile)
# p.show()
|
loxodes/SuperDARN_Hardware_Tools
|
kingsalmon_scripts/ksr_comparison_plots.py
|
Python
|
gpl-2.0
| 9,126
|
# Copyright 2008-2013 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''Package with source formats for pages.
Each module in zim.formats should contains exactly one subclass of
DumperClass and exactly one subclass of ParserClass
(optional for export formats). These can be loaded by L{get_parser()}
and L{get_dumper()} respectively. The requirement to have exactly one
subclass per module means you can not import other classes that derive
from these base classes directly into the module.
For format modules it is safe to import '*' from this module.
Parse tree structure
====================
Parse trees are build using the (c)ElementTree module (included in
python 2.5 as xml.etree.ElementTree). It is basically a xml structure
supporting a subset of "html like" tags.
Supported tags:
- page root element for grouping paragraphs
- p for paragraphs
- h for heading, level attribute can be 1..6
- pre for verbatim paragraphs (no further parsing in these blocks)
- em for emphasis, rendered italic by default
- strong for strong emphasis, rendered bold by default
- mark for highlighted text, rendered with background color or underlined
- strike for text that is removed, usually rendered as strike through
- code for inline verbatim text
- ul for bullet and checkbox lists
- ol for numbered lists
- li for list items
- link for links, attribute href gives the target
- img for images, attributes src, width, height an optionally href and alt
- type can be used to control plugin functionality, e.g. type=equation
- table for tables, attributes
* aligns - comma separated values: right,left,center
* wraps - 0 for not wrapped, 1 for auto-wrapped line display
- thead for table header row
- th for table header cell
- trow for table row
- td for table data cell
Nesting rules:
- paragraphs, list items, table cells & headings can contain all inline elements
- inline formats can contain other inline formats as well as links and tags
- code and pre cannot contain any other elements
Unlike html we respect line breaks and other whitespace as is.
When rendering as html use the "white-space: pre" CSS definition to
get the same effect.
If a page starts with a h1 this heading is considered the page title,
else we can fall back to the page name as title.
NOTE: To avoid confusion: "headers" refers to meta data, usually in
the form of rfc822 headers at the top of a page. But "heading" refers
to a title or subtitle in the document.
'''
import re
import string
import itertools
import logging
import types
from zim.fs import Dir, File
from zim.parsing import link_type, is_url_re, is_www_link_re, \
url_encode, url_decode, URL_ENCODE_READABLE, URL_ENCODE_DATA
from zim.parser import Builder
from zim.config import data_file, ConfigDict
from zim.plugins import PluginManager
import zim.plugins
from functools import reduce
logger = logging.getLogger('zim.formats')
# Needed to determine RTL, but may not be available
# if gtk bindings are not installed
try:
from gi.repository import Pango
except:
Pango = None
logger.warn('Could not load pango - RTL scripts may look bad')
import xml.etree.ElementTree # needed to compile with cElementTree
try:
import xml.etree.cElementTree as ElementTreeModule
except: #pragma: no cover
import xml.etree.ElementTree as ElementTreeModule
EXPORT_FORMAT = 1
IMPORT_FORMAT = 2
NATIVE_FORMAT = 4
TEXT_FORMAT = 8 # Used for "Copy As" menu - these all prove "text/plain" mimetype
UNCHECKED_BOX = 'unchecked-box'
CHECKED_BOX = 'checked-box'
XCHECKED_BOX = 'xchecked-box'
MIGRATED_BOX = 'migrated-box'
TRANSMIGRATED_BOX = "transmigrated-box"
BULLET = '*' # FIXME make this 'bullet'
FORMATTEDTEXT = 'zim-tree'
FRAGMENT = 'zim-tree'
HEADING = 'h'
PARAGRAPH = 'p'
VERBATIM_BLOCK = 'pre' # should be same as verbatim
BLOCK = 'div'
IMAGE = 'img'
OBJECT = 'object'
BULLETLIST = 'ul'
NUMBEREDLIST = 'ol'
LISTITEM = 'li'
EMPHASIS = 'emphasis' # TODO change to "em" to be in line with html
STRONG = 'strong'
MARK = 'mark'
VERBATIM = 'code'
STRIKE = 'strike'
SUBSCRIPT = 'sub'
SUPERSCRIPT = 'sup'
LINK = 'link'
TAG = 'tag'
ANCHOR = 'anchor'
TABLE = 'table'
HEADROW = 'thead'
HEADDATA = 'th'
TABLEROW = 'trow'
TABLEDATA = 'td'
LINE = 'line'
BLOCK_LEVEL = (PARAGRAPH, HEADING, VERBATIM_BLOCK, BLOCK, OBJECT, IMAGE, LISTITEM, TABLE)
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def increase_list_iter(listiter):
'''Get the next item in a list for a numbered list
E.g if C{listiter} is C{"1"} this function returns C{"2"}, if it
is C{"a"} it returns C{"b"}.
@param listiter: the current item, either an integer number or
single letter
@returns: the next item, or C{None}
'''
try:
i = int(listiter)
return str(i + 1)
except ValueError:
try:
i = _letters.index(listiter)
return _letters[i + 1]
except ValueError: # listiter is not a letter
return None
except IndexError: # wrap to start of list
return _letters[0]
def convert_list_iter_letter_to_number(listiter):
'''Convert a "letter" numbered list to a digit numbered list
Usefull for export to formats that do not support letter lists.
Both "A." and "a." convert to "1." assumption is that this function
is used for start iter only, not whole list
'''
try:
i = int(listiter)
return listiter
except ValueError:
try:
i = _letters.index(listiter) + 1
i = i if i <= 26 else i % 26
return str(i)
except ValueError: # listiter is not a letter
return None
def encode_xml(text):
'''Encode text such that it can be used in xml
@param text: label text as string
@returns: encoded text
'''
return text.replace('&', '&').replace('>', '>').replace('<', '<').replace('"', '"').replace("'", ''')
def list_formats(type):
if type == EXPORT_FORMAT:
return ['HTML', 'LaTeX', 'Markdown (pandoc)', 'RST (sphinx)']
elif type == TEXT_FORMAT:
return ['Text', 'Wiki', 'Markdown (pandoc)', 'RST (sphinx)']
else:
assert False, 'TODO'
def canonical_name(name):
# "HTML" -> html
# "Markdown (pandoc)" -> "markdown"
# "Text" -> "plain"
name = name.lower()
if ' ' in name:
name, _ = name.split(' ', 1)
if name == 'text':
return 'plain'
else:
return name
_aliases = {
'zim-wiki': 'wiki',
}
def get_format(name):
'''Returns the module object for a specific format.'''
# If this method is removes, class names in formats/*.py can be made more explicit
#~ print('DEPRECATED: get_format() is deprecated in favor if get_parser() and get_dumper()')
return get_format_module(name)
def get_format_module(name):
'''Returns the module object for a specific format
@param name: the format name
@returns: a module object
'''
name = _aliases.get(name, name)
return zim.plugins.get_module('zim.formats.' + canonical_name(name))
def get_parser(name, *arg, **kwarg):
'''Returns a parser object instance for a specific format
@param name: format name
@param arg: arguments to pass to the parser object
@param kwarg: keyword arguments to pass to the parser object
@returns: parser object instance (subclass of L{ParserClass})
'''
module = get_format_module(name)
klass = zim.plugins.lookup_subclass(module, ParserClass)
return klass(*arg, **kwarg)
def get_dumper(name, *arg, **kwarg):
'''Returns a dumper object instance for a specific format
@param name: format name
@param arg: arguments to pass to the dumper object
@param kwarg: keyword arguments to pass to the dumper object
@returns: dumper object instance (subclass of L{DumperClass})
'''
module = get_format_module(name)
klass = zim.plugins.lookup_subclass(module, DumperClass)
return klass(*arg, **kwarg)
def heading_to_anchor(name):
"""Derive an anchor name from a heading"""
name = re.sub(r'\s', '-', name.strip().lower())
return re.sub(r'[^\w\-_]', '', name)
class ParseTree(object):
'''Wrapper for zim parse trees.'''
# No longer derives from ElementTree, internals are now private
# TODO, also remove etree args from init
# TODO, rename to FormattedText
def __init__(self, *arg, **kwarg):
self._etree = ElementTreeModule.ElementTree(*arg, **kwarg)
self._object_cache = {}
self.meta = DefinitionOrderedDict()
@property
def hascontent(self):
'''Returns True if the tree contains any content at all.'''
root = self._etree.getroot()
return root is not None and (
bool(list(root)) or (root.text and not root.text.isspace())
)
@property
def ispartial(self):
'''Returns True when this tree is a segment of a page
(like a copy-paste buffer).
'''
return self._etree.getroot().attrib.get('partial', False)
@property
def israw(self):
'''Returns True when this is a raw tree (which is representation
of TextBuffer, but not really valid).
'''
return self._etree.getroot().attrib.get('raw', False)
def _set_root_attrib(self, key, value):
self._etree.getroot().attrib[key] = value
def _get_root_attrib(self, key, default=None):
return self._etree.getroot().attrib.get(key, default)
def _pop_root_attrib(self, key, default=None):
return self._etree.getroot().attrib.pop(key, default)
def extend(self, tree):
# Do we need a deepcopy here ?
myroot = self._etree.getroot()
otherroot = tree._etree.getroot()
if otherroot.text:
children = list(myroot)
if children:
last = children[-1]
last.tail = (last.tail or '') + otherroot.text
else:
myroot.text = (myroot.text or '') + otherroot.text
for element in iter(otherroot):
myroot.append(element)
return self
__add__ = extend
def fromstring(self, string):
'''Set the contents of this tree from XML representation.'''
parser = ElementTreeModule.XMLParser()
parser.feed(string)
root = parser.close()
self._etree._setroot(root)
return self # allow ParseTree().fromstring(..)
def tostring(self):
'''Serialize the tree to a XML representation'''
from io import StringIO
# HACK: Force sorting of attrib - else change in python3.8 breaks test cases
# Ensure all attrib are string, else ElementTree fails
for element in self._etree.iter('*'):
myattrib = element.attrib.copy()
element.attrib.clear()
for key in sorted(myattrib.keys()):
element.attrib[key] = str(myattrib[key])
xml = StringIO()
xml.write("<?xml version='1.0' encoding='utf-8'?>\n")
ElementTreeModule.ElementTree.write(self._etree, xml, 'unicode')
return xml.getvalue()
def copy(self):
builder = ParseTreeBuilder(_parsetree_roundtrip=True)
self.visit(builder)
return builder.get_parsetree()
def iter_tokens(self):
from zim.tokenparser import TokenBuilder
tb = TokenBuilder()
self.visit(tb)
return iter(tb.tokens)
def iter_href(self, include_page_local_links=False, include_anchors=False):
'''Generator for links in the text
@param include_anchors: if C{False} remove the target location from the
link and only yield unique links to pages
@returns: yields a list of unique L{HRef} objects
'''
from zim.notebook.page import HRef # XXX
seen = set()
for elt in itertools.chain(
self._etree.iter(LINK),
self._etree.iter(IMAGE)
):
href = elt.attrib.get('href')
if not href or link_type(href) != 'page':
continue
try:
href_obj = HRef.new_from_wiki_link(href)
except ValueError:
continue
if not include_anchors:
if not href_obj.names:
continue # internal link within same page
elif href_obj.anchor:
href_obj.anchor = None
href = href_obj.to_wiki_link()
if href in seen:
continue
seen.add(href)
yield href_obj
def iter_tag_names(self):
'''Generator for tags in the page content
@returns: yields an unordered list of tag names
'''
seen = set()
for elt in self._etree.iter(TAG):
name = elt.text
if not name in seen:
seen.add(name)
yield name.lstrip('@')
def _get_heading_element(self, level=1):
root = self._etree.getroot()
children = list(root)
if root.text and not root.text.isspace():
return None
if children:
first = children[0]
if first.tag == 'h' and int(first.attrib['level']) >= level:
return first
return None
def get_heading_level(self):
heading_elem = self._get_heading_element()
if heading_elem is not None:
return int(heading_elem.attrib['level'])
else:
return None
def _elt_to_text(self, elt):
strings = [elt.text]
for e in elt:
strings.append(self._elt_to_text(e)) # recurs
strings.append(e.tail)
return ''.join(s for s in strings if s) # remove possible None values
def get_heading_text(self, level=1):
heading_elem = self._get_heading_element(level)
if heading_elem is not None:
return self._elt_to_text(heading_elem)
else:
return ""
def set_heading_text(self, text, level=1):
'''Set the first heading of the parse tree to 'text'. If the tree
already has a heading of the specified level or higher it will be
replaced. Otherwise the new heading will be prepended.
'''
heading = self._get_heading_element(level)
if heading is not None:
heading.text = text
for e in heading:
heading.remove(e)
else:
root = self._etree.getroot()
heading = ElementTreeModule.Element('h', {'level': level})
heading.text = text
heading.tail = '\n' + (root.text or '')
root.text = None
root.insert(0, heading)
def remove_heading(self, level=-1):
'''If the tree starts with a heading, remove it and any trailing
whitespace.
Will modify the tree.
@returns: a 2-tuple of text and heading level or C{(None, None)}
'''
root = self._etree.getroot()
roottext = root.text and not root.text.isspace()
children = list(root)
if children and not roottext:
first = children[0]
if first.tag == 'h':
mylevel = int(first.attrib['level'])
if level == -1 or mylevel <= level:
root.remove(first)
if first.tail and not first.tail.isspace():
root.text = first.tail # Keep trailing text
def cleanup_headings(self, offset=0, max=6):
'''Change the heading levels throughout the tree. This makes sure that
al headings are nested directly under their parent (no gaps in the
levels of the headings). Also you can set an offset for the top level
and a max depth.
'''
path = []
for heading in self._etree.iter('h'):
level = int(heading.attrib['level'])
# find parent header in path using old level
while path and path[-1][0] >= level:
path.pop()
if not path:
newlevel = offset + 1
else:
newlevel = path[-1][1] + 1
if newlevel > max:
newlevel = max
heading.attrib['level'] = newlevel
path.append((level, newlevel))
def resolve_images(self, notebook=None, path=None):
'''Resolves the source files for all images relative to a page path and
adds a '_src_file' attribute to the elements with the full file path.
'''
if notebook is None:
for element in self._etree.iter('img'):
filepath = element.attrib['src']
element.attrib['_src_file'] = File(filepath)
else:
for element in self._etree.iter('img'):
filepath = element.attrib['src']
element.attrib['_src_file'] = notebook.resolve_file(filepath, path)
def unresolve_images(self):
'''Undo effect of L{resolve_images()}, mainly intended for
testing.
'''
for element in self._etree.iter('img'):
if '_src_file' in element.attrib:
element.attrib.pop('_src_file')
def encode_urls(self, mode=URL_ENCODE_READABLE):
'''Calls encode_url() on all links that contain urls.
See zim.parsing for details. Modifies the parse tree.
'''
for link in self._etree.iter('link'):
href = link.attrib['href']
if href and is_url_re.match(href) or is_www_link_re.match(href):
link.attrib['href'] = url_encode(href, mode=mode)
if link.text == href:
link.text = link.attrib['href']
def decode_urls(self, mode=URL_ENCODE_READABLE):
'''Calls decode_url() on all links that contain urls.
See zim.parsing for details. Modifies the parse tree.
'''
for link in self._etree.iter('link'):
href = link.attrib['href']
if href and is_url_re.match(href) or is_www_link_re.match(href):
link.attrib['href'] = url_decode(href, mode=mode)
if link.text == href:
link.text = link.attrib['href']
def count(self, text):
'''Returns the number of occurences of 'text' in this tree.'''
count = 0
for element in self._etree.iter():
if element.text:
count += element.text.count(text)
if element.tail:
count += element.tail.count(text)
return count
def countre(self, regex):
'''Returns the number of matches for a regular expression
in this tree.
'''
count = 0
for element in self._etree.iter():
if element.text:
newstring, n = regex.subn('', element.text)
count += n
if element.tail:
newstring, n = regex.subn('', element.tail)
count += n
return count
def get_ends_with_newline(self):
'''Checks whether this tree ends in a newline or not'''
return self._get_element_ends_with_newline(self._etree.getroot())
def _get_element_ends_with_newline(self, element):
if element.tail:
return element.tail.endswith('\n')
elif element.tag in ('li', 'h'):
return True # implicit newline
else:
children = list(element)
if children:
return self._get_element_ends_with_newline(children[-1]) # recurs
elif element.text:
return element.text.endswith('\n')
else:
return False # empty element like image
def visit(self, visitor):
'''Visit all nodes of this tree
@note: If the visitor modifies the attrib dict on nodes, this
will modify the tree.
@param visitor: a L{Visitor} or L{Builder} object
'''
try:
self._visit(visitor, self._etree.getroot())
except VisitorStop:
pass
def _visit(self, visitor, node):
try:
if len(node): # Has children
visitor.start(node.tag, node.attrib)
if node.text:
visitor.text(node.text)
for child in node:
self._visit(visitor, child) # recurs
if child.tail:
visitor.text(child.tail)
visitor.end(node.tag)
else:
visitor.append(node.tag, node.attrib, node.text)
except VisitorSkip:
pass
def find(self, tag):
'''Find first occurence of C{tag} in the tree
@returns: a L{Node} object or C{None}
'''
for elt in self.findall(tag):
return elt # return first
else:
return None
def findall(self, tag):
'''Find all occurences of C{tag} in the tree
@param tag: tag name
@returns: yields L{Node} objects
'''
for elt in self._etree.iter(tag):
yield Element.new_from_etree(elt)
def replace(self, tags, func):
'''Modify the tree by replacing all occurences of C{tag}
by the return value of C{func}.
@param tags: tag name, or list of tag names
@param func: function to generate replacement values.
Function will be called as::
func(node)
Where C{node} is a L{Node} object representing the subtree.
If the function returns another L{Node} object or modifies
C{node} and returns it, the subtree will be replaced by this
new node.
If the function raises L{VisitorSkip} the replace is skipped.
If the function raises L{VisitorStop} the replacement of all
nodes will stop.
'''
if not isinstance(tags, (tuple, list)):
tags = (tags,)
try:
self._replace(self._etree.getroot(), tags, func)
except VisitorStop:
pass
def _replace(self, elt, tags, func):
# Two-step replace in order to do items in order
# of appearance.
replacements = []
for i, child in enumerate(elt):
if child.tag in tags:
try:
replacement = func(Element.new_from_etree(child))
except VisitorSkip:
pass
else:
replacements.append((i, child, replacement))
elif len(child):
self._replace(child, tags, func) # recurs
else:
pass
if replacements:
self._do_replace(elt, replacements)
def _do_replace(self, elt, replacements):
offset = 0 # offset due to replacements
for i, child, node in replacements:
i += offset
if node is None:
# Remove element
tail = child.tail
elt.remove(child)
if tail:
self._insert_text(elt, i, tail)
offset -= 1
elif isinstance(node, Element):
# Just replace elements
newchild = self._node_to_etree(node)
newchild.tail = child.tail
elt[i] = newchild
elif isinstance(node, DocumentFragment):
# Insert list of elements and text
tail = child.tail
elt.remove(child)
offset -= 1
for item in node:
if isinstance(item, str):
self._insert_text(elt, i, item)
else:
assert isinstance(item, Element)
elt.insert(i, self._node_to_etree(item))
i += 1
offset += 1
if tail:
self._insert_text(elt, i, tail)
else:
raise TypeError('BUG: invalid replacement result')
@staticmethod
def _node_to_etree(node):
builder = ParseTreeBuilder()
node.visit(builder)
return builder._b.close()
def _insert_text(self, elt, i, text):
if i == 0:
if elt.text:
elt.text += text
else:
elt.text = text
else:
prev = elt[i - 1]
if prev.tail:
prev.tail += text
else:
prev.tail = text
class VisitorStop(Exception):
'''Exception to be raised to cancel a visitor action'''
pass
class VisitorSkip(Exception):
'''Exception to be raised when the visitor should skip a leaf node
and not decent into it.
'''
pass
class Visitor(object):
'''Conceptual opposite of a builder, but with same API.
Used to walk nodes in a parsetree and call callbacks for each node.
See e.g. L{ParseTree.visit()}.
'''
def start(self, tag, attrib=None):
'''Start formatted region
Visitor objects can raise two exceptions in this method
to influence the tree traversal:
1. L{VisitorStop} will cancel the current parsing, but without
raising an error. So code implementing a visit method should
catch this.
2. L{VisitorSkip} can be raised when the visitor wants to skip
a node, and should prevent the implementation from further
decending into this node
@note: If the visitor modifies the attrib dict on nodes, this
will modify the tree. If this is not intended, the implementation
needs to take care to copy the attrib to break the reference.
@param tag: the tag name
@param attrib: optional dict with attributes
@implementation: optional for subclasses
'''
pass
def text(self, text):
'''Append text
@param text: text to be appended as string
@implementation: optional for subclasses
'''
pass
def end(self, tag):
'''End formatted region
@param tag: the tag name
@raises AssertionError: when tag does not match current state
@implementation: optional for subclasses
'''
pass
def append(self, tag, attrib=None, text=None):
'''Convenience function to open a tag, append text and close
it immediatly.
Can raise L{VisitorStop} or L{VisitorSkip}, see C{start()}
for the conditions.
@param tag: the tag name
@param attrib: optional dict with attributes
@param text: formatted text
@implementation: optional for subclasses, default implementation
calls L{start()}, L{text()}, and L{end()}
'''
self.start(tag, attrib)
if text is not None:
self.text(text)
self.end(tag)
class ParseTreeBuilder(Builder):
'''Builder object that builds a L{ParseTree}'''
def __init__(self, partial=False, _parsetree_roundtrip=False):
self.partial = partial
self._b = ElementTreeModule.TreeBuilder()
self.stack = [] #: keeps track of current open elements
self._last_char = None
self._parsetree_roundtrip = _parsetree_roundtrip
def get_parsetree(self):
'''Returns the constructed L{ParseTree} object.
Can only be called once, after calling this method the object
can not be re-used.
'''
root = self._b.close()
if self.partial:
root.attrib['partial'] = True
return zim.formats.ParseTree(root)
def start(self, tag, attrib=None):
attrib = attrib.copy() if attrib is not None else {}
self._b.start(tag, attrib)
self.stack.append(tag)
if tag in BLOCK_LEVEL:
self._last_char = None
def text(self, text):
self._last_char = text[-1]
# FIXME hack for backward compat
if self.stack and self.stack[-1] in (HEADING, LISTITEM):
text = text.strip('\n')
self._b.data(text)
def end(self, tag):
if tag != self.stack[-1]:
raise AssertionError('Unmatched tag closed: %s' % tag)
if tag in BLOCK_LEVEL and not self._parsetree_roundtrip:
if self._last_char is not None and not self.partial:
#~ assert self._last_char == '\n', 'Block level text needs to end with newline'
if self._last_char != '\n' and tag not in (HEADING, LISTITEM):
self._b.data('\n')
# FIXME check for HEADING LISTITME for backward compat
# TODO if partial only allow missing \n at end of tree,
# delay message and trigger if not followed by get_parsetree ?
self._b.end(tag)
self.stack.pop()
# FIXME hack for backward compat
if tag == HEADING and not self._parsetree_roundtrip:
self._b.data('\n')
self._last_char = None
def append(self, tag, attrib=None, text=None):
attrib = attrib.copy() if attrib is not None else {}
if tag in BLOCK_LEVEL:
if text and not text.endswith('\n'):
text += '\n'
# FIXME hack for backward compat
if text and tag in (HEADING, LISTITEM):
text = text.strip('\n')
self._b.start(tag, attrib)
if text:
self._b.data(text)
self._b.end(tag)
# FIXME hack for backward compat
if tag == HEADING and not self._parsetree_roundtrip:
self._b.data('\n')
self._last_char = None
count_eol_re = re.compile(r'\n+\Z')
split_para_re = re.compile(r'((?:^[ \t]*\n){2,})', re.M)
class OldParseTreeBuilder(object):
'''This class supplies an alternative for xml.etree.ElementTree.TreeBuilder
which cleans up the tree on the fly while building it. The main use
is to normalize the tree that is produced by the editor widget, but it can
also be used on other "dirty" interfaces.
This builder takes care of the following issues:
- ~~Inline tags ('emphasis', 'strong', 'h', etc.) can not span multiple lines~~
(refactored out to `TextBuffer.get_parsetree()`)
- Tags can not contain only whitespace
- Tags can not be empty (with the exception of the 'img' tag)
- There should be an empty line before each 'h', 'p' or 'pre'
(with the exception of the first tag in the tree)
- The 'p' and 'pre' elements should always end with a newline ('\\n')
- Each 'p', 'pre' and 'h' should be postfixed with a newline ('\\n')
(as a results 'p' and 'pre' are followed by an empty line, the
'h' does not end in a newline itself, so it is different)
- Newlines ('\\n') after a <li> alement are removed (optional)
- The element '_ignore_' is silently ignored
'''
## TODO TODO this also needs to be based on Builder ##
def __init__(self, remove_newlines_after_li=True):
assert remove_newlines_after_li, 'TODO'
self._stack = [] # stack of elements for open tags
self._last = None # last element opened or closed
self._data = [] # buffer with data
self._tail = False # True if we are after an end tag
self._seen_eol = 2 # track line ends on flushed data
# starts with "2" so check is ok for first top level element
def start(self, tag, attrib=None):
if tag == '_ignore_':
return self._last
elif tag == 'h':
self._flush(need_eol=2)
elif tag in ('p', 'pre'):
self._flush(need_eol=1)
else:
self._flush()
#~ print('START', tag)
if tag == 'h':
if not (attrib and 'level' in attrib):
logger.warn('Missing "level" attribute for heading')
attrib = attrib or {}
attrib['level'] = 1
elif tag == 'link':
if not (attrib and 'href' in attrib):
logger.warn('Missing "href" attribute for link')
attrib = attrib or {}
attrib['href'] = "404"
# TODO check other mandatory properties !
if attrib:
self._last = ElementTreeModule.Element(tag, attrib)
else:
self._last = ElementTreeModule.Element(tag)
if self._stack:
self._stack[-1].append(self._last)
else:
assert tag == 'zim-tree', 'root element needs to be "zim-tree"'
self._stack.append(self._last)
self._tail = False
return self._last
def end(self, tag):
if tag == '_ignore_':
return None
elif tag in ('p', 'pre'):
self._flush(need_eol=1)
else:
self._flush()
#~ print('END', tag)
self._last = self._stack[-1]
assert self._last.tag == tag, \
"end tag mismatch (expected %s, got %s)" % (self._last.tag, tag)
self._tail = True
if len(self._stack) > 1 and not (
tag in (IMAGE, OBJECT, HEADDATA, TABLEDATA)
or (self._last.text and not self._last.text.isspace())
or bool(list(self._last))
):
# purge empty tags
if self._last.text and self._last.text.isspace():
self._append_to_previous(self._last.text)
empty = self._stack.pop()
self._stack[-1].remove(empty)
children = list(self._stack[-1])
if children:
self._last = children[-1]
if not self._last.tail is None:
self._data = [self._last.tail]
self._last.tail = None
else:
self._last = self._stack[-1]
self._tail = False
if not self._last.text is None:
self._data = [self._last.text]
self._last.text = None
return empty
else:
return self._stack.pop()
def data(self, text):
assert isinstance(text, str)
self._data.append(text)
def append(self, tag, text):
self.start(tag)
self.data(text)
self.end(tag)
def _flush(self, need_eol=0):
# need_eol makes sure previous data ends with \n
#~ print('DATA:', self._data)
text = ''.join(self._data)
self._data = []
# Fix trailing newlines
if text:
m = count_eol_re.search(text)
if m:
seen = len(m.group(0))
if seen == len(text):
self._seen_eol += seen
else:
self._seen_eol = seen
else:
self._seen_eol = 0
if need_eol > self._seen_eol:
text += '\n' * (need_eol - self._seen_eol)
self._seen_eol = need_eol
# Fix prefix newlines
if self._tail and self._last.tag in ('h', 'p') \
and not text.startswith('\n'):
if text:
text = '\n' + text
else:
text = '\n'
self._seen_eol = 1
elif self._tail and self._last.tag == 'li' \
and text.startswith('\n'):
text = text[1:]
if not text.strip('\n'):
self._seen_eol -= 1
if text:
assert not self._last is None, 'data seen before root element'
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
def close(self):
assert len(self._stack) == 0, 'missing end tags'
assert not self._last is None and self._last.tag == 'zim-tree', 'missing root element'
return self._last
def _append_to_previous(self, text):
'''Add text before current element'''
parent = self._stack[-2]
children = list(parent)[:-1]
if children:
if children[-1].tail:
children[-1].tail = children[-1].tail + text
else:
children[-1].tail = text
else:
if parent.text:
parent.text = parent.text + text
else:
parent.text = text
class ParserClass(object):
'''Base class for parsers
Each format that can be used natively should define a class
'Parser' which inherits from this base class.
'''
def parse(self, input):
'''ABSTRACT METHOD: needs to be overloaded by sub-classes.
This method takes a text or an iterable with lines and returns
a ParseTree object.
'''
raise NotImplementedError
@classmethod
def parse_image_url(self, url):
'''Parse urls style options for images like "foo.png?width=500" and
returns a dict with the options. The base url will be in the dict
as 'src'.
'''
i = url.find('?')
if i > 0:
attrib = {'src': url[:i]}
for option in url[i + 1:].split('&'):
if option.find('=') == -1:
logger.warn('Mal-formed options in "%s"', url)
break
k, v = option.split('=', 1)
if k in ('id', 'width', 'height', 'type', 'href'):
if len(v) > 0:
value = url_decode(v, mode=URL_ENCODE_DATA)
attrib[str(k)] = value # str to avoid unicode key
else:
logger.warn('Unknown attribute "%s" in "%s"', k, url)
return attrib
else:
return {'src': url}
import collections
DumperContextElement = collections.namedtuple('DumperContextElement', ('tag', 'attrib', 'text'))
# FIXME unify this class with a generic Element class (?)
class DumperClass(Visitor):
'''Base class for dumper classes. Dumper classes serialize the content
of a parse tree back to a text representation of the page content.
Therefore this class implements the visitor API, so it can be
used with any parse tree implementation or parser object that supports
this API.
To implement a dumper class, you need to define handlers for all
tags that can appear in a page. Tags that are represented by a simple
prefix and postfix string can be defined in the dictionary C{TAGS}.
For example to define the italic tag in html output the dictionary
should contain a definition like: C{EMPHASIS: ('<i>', '</i>')}.
For tags that require more complex logic you can define a method to
format the tag. Typical usage is to format link attributes in such
a method. The method name should be C{dump_} + the name of the tag,
e.g. C{dump_link()} for links (see the constants with tag names for
the other tags). Such a sump method will get 3 arguments: the tag
name itself, a dictionary with the tag attributes and a list of
strings that form the tag content. The method should return a list
of strings that represents the formatted text.
This base class takes care of a stack of nested formatting tags and
when a tag is closed either picks the appropriate prefix and postfix
from C{TAGS} or calls the corresponding C{dump_} method. As a result
tags are serialized depth-first.
@ivar linker: the (optional) L{Linker} object, used to resolve links
@ivar template_options: a L{ConfigDict} with options that may be set
in a template (so inherently not safe !) to control the output style.
Formats using this need to define the supported keys in the dict
C{TEMPLATE_OPTIONS}.
@ivar context: the stack of open tags maintained by this class. Can
be used in C{dump_} methods to inspect the parent scope of the
format. Elements on this stack have "tag", "attrib" and "text"
attributes. Keep in mind that the parent scope is not yet complete
when a tag is serialized.
'''
TAGS = {} #: dict mapping formatting tags to 2-tuples of a prefix and a postfix string
TEMPLATE_OPTIONS = {} #: dict mapping ConfigDefinitions for template options
def __init__(self, linker=None, template_options=None):
self.linker = linker
self.template_options = ConfigDict(template_options)
self.template_options.define(self.TEMPLATE_OPTIONS)
self.context = []
self._text = []
def dump(self, tree):
'''Format a parsetree to text
@param tree: a parse tree object that supports a C{visit()} method
@returns: a list of lines
'''
# FIXME - issue here is that we need to reset state - should be in __init__
self._text = []
self.context = [DumperContextElement(None, None, self._text)]
tree.visit(self)
if len(self.context) != 1:
raise AssertionError('Unclosed tags on tree: %s' % self.context[-1].tag)
#~ import pprint; pprint.pprint(self._text)
return self.get_lines() # FIXME - maybe just return text ?
def get_lines(self):
'''Return the dumped content as a list of lines
Should only be called after closing the top level element
'''
return ''.join(self._text).splitlines(1)
def start(self, tag, attrib=None):
if attrib:
attrib = attrib.copy() # Ensure dumping does not change tree
self.context.append(DumperContextElement(tag, attrib, []))
def text(self, text):
assert not text is None
if self.context[-1].tag != OBJECT:
text = self.encode_text(self.context[-1].tag, text)
self.context[-1].text.append(text)
def end(self, tag):
if not tag or tag != self.context[-1].tag:
raise AssertionError('Unexpected tag closed: %s' % tag)
_, attrib, strings = self.context.pop()
if tag in self.TAGS:
assert strings, 'Can not append empty %s element' % tag
start, end = self.TAGS[tag]
strings.insert(0, start)
strings.append(end)
elif tag == FORMATTEDTEXT:
pass
else:
try:
method = getattr(self, 'dump_' + tag)
except AttributeError:
raise AssertionError('BUG: Unknown tag: %s' % tag)
strings = method(tag, attrib, strings)
#~ try:
#~ u''.join(strings)
#~ except:
#~ print("BUG: %s returned %s" % ('dump_'+tag, strings))
if strings is not None:
self.context[-1].text.extend(strings)
def append(self, tag, attrib=None, text=None):
strings = None
if tag in self.TAGS:
assert text is not None, 'Can not append empty %s element' % tag
start, end = self.TAGS[tag]
text = self.encode_text(tag, text)
strings = [start, text, end]
elif tag == FORMATTEDTEXT:
if text is not None:
strings = [self.encode_text(tag, text)]
else:
if attrib:
attrib = attrib.copy() # Ensure dumping does not change tree
try:
method = getattr(self, 'dump_' + tag)
except AttributeError:
raise AssertionError('BUG: Unknown tag: %s' % tag)
if text is None:
strings = method(tag, attrib, [])
elif tag == OBJECT:
strings = method(tag, attrib, [text])
else:
strings = method(tag, attrib, [self.encode_text(tag, text)])
if strings is not None:
self.context[-1].text.extend(strings)
def encode_text(self, tag, text):
'''Optional method to encode text elements in the output
@note: Do not apply text encoding in the C{dump_} methods, the
list of strings given there may contain prefix and postfix
formatting of nested tags.
@param tag: formatting tag
@param text: text to be encoded
@returns: encoded text
@implementation: optional, default just returns unmodified input
'''
return text
def prefix_lines(self, prefix, strings):
'''Convenience method to wrap a number of lines with e.g. an
indenting sequence.
@param prefix: a string to prefix each line
@param strings: a list of pieces of text
@returns: a new list of lines, each starting with prefix
'''
lines = ''.join(strings).splitlines(1)
return [prefix + l for l in lines]
def dump_object(self, tag, attrib, strings=[]):
'''Dumps objects defined by L{InsertedObjectType}'''
format = str(self.__class__.__module__).split('.')[-1]
try:
obj = PluginManager.insertedobjects[attrib['type']]
except KeyError:
pass
else:
try:
output = obj.format(format, self, attrib, ''.join(strings))
except ValueError:
pass
else:
assert isinstance(output, (list, tuple)), "Invalid output: %r" % output
return output
if attrib['type'].startswith('image+'):
# Fallback for backward compatibility of image generators < zim 0.70
attrib = attrib.copy()
attrib['type'] = attrib['type'][6:]
return self.dump_img(IMAGE, attrib, None)
else:
return self.dump_object_fallback(tag, attrib, strings)
def dump_object_fallback(self, tag, attrib, strings=None):
'''Method to serialize objects that do not have their own
handler for this format.
@implementation: must be implemented in sub-classes
'''
raise NotImplementedError
def isrtl(self, text):
'''Check for Right To Left script
@param text: the text to check
@returns: C{True} if C{text} starts with characters in a
RTL script, or C{None} if direction is not determined.
'''
if Pango is None:
return None
# It seems the find_base_dir() function is not documented in the
# python language bindings. The Gtk C code shows the signature:
#
# Pango.find_base_dir(text, length)
#
# It either returns a direction, or NEUTRAL if e.g. text only
# contains punctuation but no real characters.
dir = Pango.find_base_dir(text, len(text))
if dir == Pango.Direction.NEUTRAL:
return None
else:
return dir == Pango.Direction.RTL
class BaseLinker(object):
'''Base class for linker objects
Linker object translate links in zim pages to (relative) URLs.
This is used when exporting data to resolve links.
Relative URLs start with "./" or "../" and should be interpreted
in the same way as in HTML. Both URLs and relative URLs are
already URL encoded.
'''
def link(self, link):
'''Returns an url for a link in a zim page
This method is used to translate links of any type.
@param link: link to be translated
@returns: url, uri, or relative path
context of this linker
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def img(self, src):
'''Returns an url for image file 'src'
@implementation: must be implemented by child classes
'''
raise NotImplementedError
#~ def icon(self, name):
#~ '''Returns an url for an icon
#~ @implementation: must be implemented by child classes
#~ '''
#~ raise NotImplementedError
def resource(self, path):
'''Return an url for template resources
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def resolve_source_file(self, link):
'''Find the source file for an attachment
Used e.g. by the latex format to find files for equations to
be inlined. Do not use this method to resolve links, the file
given here might be temporary and is not guaranteed to be
available after the export.
@returns: a L{File} object or C{None} if no file was found
@implementation: must be implemented by child classes
'''
raise NotImplementedError
def page_object(self, path):
'''Turn a L{Path} object in a relative link or URI'''
raise NotImplementedError
def file_object(self, file):
'''Turn a L{File} object in a relative link or URI
@implementation: must be implemented by child classes
'''
raise NotImplementedError
class StubLinker(BaseLinker):
'''Linker used for testing - just gives back the link as it was
parsed. DO NOT USE outside of testing.
'''
def __init__(self, source_dir=None):
self.source_dir = source_dir
def link(self, link):
type = link_type(link)
if type == 'mailto' and not link.startswith('mailto:'):
return 'mailto:' + link
elif type == 'interwiki':
return 'interwiki:' + link
else:
return link
def img(self, src):
return src
#~ def icon(self, name):
#~ return 'icon:' + name
def resource(self, path):
return path
def resolve_source_file(self, link):
if self.source_dir:
return self.source_dir.file(link)
else:
return None
def page_object(self, path):
return path.name
def file_object(self, file):
return file.name
class Node(list):
'''Base class for DOM-like access to the document structure.
@note: This class is not optimized for keeping large structures
in memory.
@ivar tag: tag name
@ivar attrib: dict with attributes
'''
__slots__ = ('tag', 'attrib')
def __init__(self, tag, attrib=None, *content):
self.tag = tag
self.attrib = attrib
if content:
self.extend(content)
@classmethod
def new_from_etree(klass, elt):
obj = klass(elt.tag, dict(elt.attrib))
if elt.text:
obj.append(elt.text)
for child in elt:
subnode = klass.new_from_etree(child) # recurs
obj.append(subnode)
if child.tail:
obj.append(child.tail)
return obj
def get(self, key, default=None):
if self.attrib:
return self.attrib.get(key, default)
else:
return default
def set(self, key, value):
if not self.attrib:
self.attrib = {}
self.attrib[key] = value
def append(self, item):
if isinstance(item, DocumentFragment):
list.extend(self, item)
else:
list.append(self, item)
def gettext(self):
'''Get text as string
Ignores any markup and attributes and simply returns textual
content.
@note: do _not_ use as replacement for exporting to plain text
@returns: string
'''
strings = self._gettext()
return ''.join(strings)
def _gettext(self):
strings = []
for item in self:
if isinstance(item, str):
strings.append(item)
else:
strings.extend(item._gettext())
return strings
def toxml(self):
strings = self._toxml()
return ''.join(strings)
def _toxml(self):
strings = []
if self.attrib:
strings.append('<%s' % self.tag)
for key in sorted(self.attrib):
strings.append(' %s="%s"' % (key, encode_xml(str(self.attrib[key]))))
strings.append('>')
else:
strings.append("<%s>" % self.tag)
for item in self:
if isinstance(item, str):
strings.append(encode_xml(item))
else:
strings.extend(item._toxml())
strings.append("</%s>" % self.tag)
return strings
__repr__ = toxml
def visit(self, visitor):
if len(self) == 1 and isinstance(self[0], str):
visitor.append(self.tag, self.attrib, self[0])
else:
visitor.start(self.tag, self.attrib)
for item in self:
if isinstance(item, str):
visitor.text(item)
else:
item.visit(visitor)
visitor.end(self.tag)
class Element(Node):
'''Element class for DOM-like access'''
pass
class DocumentFragment(Node):
'''Document fragment class for DOM-like access'''
def __init__(self, *content):
self.tag = FRAGMENT
self.attrib = None
if content:
self.extend(content)
class TableParser():
'''Common functions for converting a table from its' xml structure to another format'''
@staticmethod
def width2dim(lines):
'''
Calculates the characters on each column and return list of widths
:param lines: 2-dim multiline rows
:return: the number of characters of the longest cell-value by column
'''
widths = [max(list(map(len, line))) for line in zip(*lines)]
return widths
@staticmethod
def width3dim(lines):
'''
Calculates the characters on each column and return list of widths
:param lines: 3-dim multiline rows
:return: the number of characters of the longest cell-value by column
'''
lines = reduce(lambda x, y: x + y, lines)
widths = [max(list(map(len, line))) for line in zip(*lines)]
return widths
@staticmethod
def convert_to_multiline_cells(rows):
'''
Each cell in a list of rows is split by "\n" and a 3-dimensional list is returned,
whereas each tuple represents a line and multiple lines represents a row and multiple rows represents the table
c11a = Cell in Row 1 in Column 1 in first = a line
:param strings: format like (('c11a \n c11b', 'c12a \n c12b'), ('c21', 'c22a \n 22b'))
:return: format like (((c11a, c12a), (c11b, c12b)), ((c21, c22a), ('', c22b)))
'''
multi_rows = [[cell.split("\n") for cell in row] for row in rows]
# grouping by line, not by row
strings = [list(map(lambda *line: [val if val is not None else '' for val in line], *row)) for row in multi_rows]
return strings
@staticmethod
def get_options(attrib):
'''
Lists the attributes as tuple
:param attrib:
:return: tuple of attributes
'''
aligns = attrib['aligns'].split(',')
wraps = list(map(int, attrib['wraps'].split(',')))
return aligns, wraps
@staticmethod
def rowsep(maxwidths, x='+', y='-'):
'''
Displays a row separator
example: rowsep((3,0), '-', '+') -> +-----+--+
:param maxwidths: list of column lengths
:param x: point-separator
:param y: line-separator
:return: a textline
'''
return x + x.join([(width + 2) * y for width in maxwidths]) + x
@staticmethod
def headsep(maxwidths, aligns, x='|', y='-'):
'''
Displays a header separation with alignment infos
example: rowsep((3,0), '-', '+') -> +-----+--+
:param maxwidths: list of column lengths
:param aligns: list of alignments
:param x: point-separator
:param y: line-separator
:return: a text line
'''
cells = []
for width, align in zip(maxwidths, aligns):
line = width * y
if align == 'left':
cell = ':' + line + y
elif align == 'right':
cell = y + line + ':'
elif align == 'center':
cell = ':' + line + ':'
else:
cell = y + line + y
cells.append(cell)
return x + x.join(cells) + x
@staticmethod
def headline(row, maxwidths, aligns, wraps, x='|', y=' '):
'''
Displays a headerline line in text format
:param row: tuple of cells
:param maxwidths: list of column length
:param aligns: list of alignments
:param x: point-separator
:param y: space-separator
:return: a textline
'''
row = TableParser.alignrow(row, maxwidths, aligns, y)
cells = []
for val, wrap in zip(row, wraps):
if wrap == 1:
val = val[:-1] + '<'
cells.append(val)
return x + x.join(cells) + x
@staticmethod
def rowline(row, maxwidths, aligns, x='|', y=' '):
'''
Displays a normal column line in text format
example: rowline((3,0), (left, left), '+','-') -> +-aa--+--+
:param row: tuple of cells
:param maxwidths: list of column length
:param aligns: list of alignments
:param x: point-separator
:param y: space-separator
:return: a textline
'''
cells = TableParser.alignrow(row, maxwidths, aligns, y)
return x + x.join(cells) + x
@staticmethod
def alignrow(row, maxwidths, aligns, y=' '):
'''
Formats a row with the right alignments
:param row: tuple of cells
:param maxwidths: list of column length
:param aligns: list of alignments
:param y: space-separator
:return: a textline
'''
cells = []
for val, align, maxwidth in zip(row, aligns, maxwidths):
if align == 'left':
(lspace, rspace) = (1, maxwidth - len(val) + 1)
elif align == 'right':
(lspace, rspace) = (maxwidth - len(val) + 1, 1)
elif align == 'center':
lspace = (maxwidth - len(val)) // 2 + 1
rspace = (maxwidth - lspace - len(val) + 2)
else:
(lspace, rspace) = (1, maxwidth - len(val) + 1)
cells.append(lspace * y + val + rspace * y)
return cells
from zim.config.dicts import DefinitionOrderedDict
_is_header_re = re.compile('^([\w\-]+):\s+(.*?)\n', re.M)
_is_continue_re = re.compile('^([^\S\n]+)(.+?)\n', re.M)
def parse_header_lines(text):
'''Read header lines in the rfc822 format.
Can e.g. look like::
Content-Type: text/x-zim-wiki
Wiki-Format: zim 0.4
Creation-Date: 2010-12-14T14:15:09.134955
@returns: the text minus the headers and a dict with the headers
'''
assert isinstance(text, str)
meta = DefinitionOrderedDict()
match = _is_header_re.match(text)
pos = 0
while match:
header = match.group(1)
value = match.group(2)
pos = match.end()
meta[header] = value.strip()
match = _is_continue_re.match(text, pos)
while match:
cont = match.group(2)
meta[header] += '\n' + cont.strip()
pos = match.end()
match = _is_continue_re.match(text, pos)
match = _is_header_re.match(text, pos)
else:
if pos > 0:
try:
if text[pos] == '\n':
pos += 1
except IndexError:
pass
text = text[pos:]
return text, meta
def dump_header_lines(*headers):
'''Return text representation of header dict'''
text = []
append = lambda k, v: text.extend((k, ': ', v.strip().replace('\n', '\n\t'), '\n'))
for h in headers:
if hasattr(h, 'items'):
for k, v in list(h.items()):
append(k, v)
else:
for k, v in h:
append(k, v)
return ''.join(text)
|
jaap-karssenberg/zim-desktop-wiki
|
zim/formats/__init__.py
|
Python
|
gpl-2.0
| 50,661
|
# iSCSI configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from IPy import IP
from collections import namedtuple
from gi.repository import GLib
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.ui.gui import GUIObject
from pyanaconda import nm
__all__ = ["ISCSIDialog"]
STYLE_NONE = 0
STYLE_CHAP = 1
STYLE_REVERSE_CHAP = 2
Credentials = namedtuple("Credentials", ["style",
"targetIP", "initiator", "username",
"password", "rUsername", "rPassword"])
NodeStoreRow = namedtuple("NodeStoreRow", ["selected", "notLoggedIn", "name", "iface"])
def discover_no_credentials(builder):
return Credentials(STYLE_NONE,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
"", "", "", "")
def discover_chap(builder):
return Credentials(STYLE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("chapUsernameEntry").get_text(),
builder.get_object("chapPasswordEntry").get_text(),
"", "")
def discover_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("rchapUsernameEntry").get_text(),
builder.get_object("rchapPasswordEntry").get_text(),
builder.get_object("rchapReverseUsername").get_text(),
builder.get_object("rchapReversePassword").get_text())
# This list maps the current page from the authNotebook to a function to grab
# credentials out of the UI. This works as long as authNotebook keeps the
# filler page at the front.
discoverMap = [discover_no_credentials, discover_chap, discover_reverse_chap]
def login_no_credentials(builder):
return Credentials(STYLE_NONE,
"", "",
"", "", "", "")
def login_chap(builder):
return Credentials(STYLE_CHAP,
"", "",
builder.get_object("loginChapUsernameEntry").get_text(),
builder.get_object("loginChapPasswordEntry").get_text(),
"", "")
def login_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
"", "",
builder.get_object("loginRchapUsernameEntry").get_text(),
builder.get_object("loginRchapPasswordEntry").get_text(),
builder.get_object("loginRchapReverseUsername").get_text(),
builder.get_object("loginRchapReversePassword").get_text())
# And this list maps the current page from the loginAuthNotebook to a function
# to grab credentials out of the UI. This works as long as loginAuthNotebook
# keeps the filler page at the front, and we check to make sure "Use the
# credentials from discovery" is not selected first.
loginMap = [login_no_credentials, login_chap, login_reverse_chap]
def credentials_valid(credentials):
if credentials.style == STYLE_NONE:
return True
elif credentials.style == STYLE_CHAP:
return credentials.username.strip() != "" and credentials.password != ""
elif credentials.style == STYLE_REVERSE_CHAP:
return credentials.username.strip() != "" and credentials.password != "" and \
credentials.rUsername.strip() != "" and credentials.rPassword != ""
class ISCSIDialog(GUIObject):
builderObjects = ["iscsiDialog", "nodeStore", "nodeStoreFiltered"]
mainWidgetName = "iscsiDialog"
uiFile = "spokes/advstorage/iscsi.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.iscsi = self.storage.iscsi()
self._discoveryError = None
self._loginError = False
self._discoveredNodes = []
self._update_devicetree = False
self._authTypeCombo = self.builder.get_object("authTypeCombo")
self._authNotebook = self.builder.get_object("authNotebook")
self._iscsiNotebook = self.builder.get_object("iscsiNotebook")
self._loginButton = self.builder.get_object("loginButton")
self._loginAuthTypeCombo = self.builder.get_object("loginAuthTypeCombo")
self._loginAuthNotebook = self.builder.get_object("loginAuthNotebook")
self._loginGrid = self.builder.get_object("loginGrid")
self._loginConditionNotebook = self.builder.get_object("loginConditionNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._bindCheckbox = self.builder.get_object("bindCheckbutton")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._initiatorEntry = self.builder.get_object("initiatorEntry")
self._store = self.builder.get_object("nodeStore")
def refresh(self):
self._bindCheckbox.set_active(bool(self.iscsi.ifaces))
self._bindCheckbox.set_sensitive(self.iscsi.mode == "none")
self._authTypeCombo.set_active(0)
self._startButton.set_sensitive(True)
self._loginAuthTypeCombo.set_active(0)
self.builder.get_object("nodeStoreFiltered").set_visible_column(1)
self._initiatorEntry.set_text(self.iscsi.initiator)
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
@property
def selectedNames(self):
return [itr[2] for itr in self._store if itr[0]]
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
##
## DISCOVERY
##
def on_auth_type_changed(self, widget, *args):
self._authNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Start button sensitivity.
self.on_discover_field_changed()
def _discover(self, credentials, bind):
# This needs to be in its own thread, not marked with gtk_action_* because it's
# called from on_start_clicked, which is in the GTK main loop. Those decorators
# won't do anything special in that case.
if not self.iscsi.initiatorSet:
self.iscsi.initiator = credentials.initiator
# interfaces created here affect nodes that iscsi.discover would return
if self.iscsi.mode == "none" and not bind:
self.iscsi.delete_interfaces()
elif (self.iscsi.mode == "bind"
or self.iscsi.mode == "none" and bind):
activated = set(nm.nm_activated_devices())
created = set(self.iscsi.ifaces.values())
self.iscsi.create_interfaces(activated - created)
try:
self._discoveredNodes = self.iscsi.discover(credentials.targetIP,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
except IOError as e:
self._discoveryError = str(e)
return
if len(self._discoveredNodes) == 0:
self._discoveryError = "No nodes discovered."
def _check_discover(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_DISCOVER):
return True
# When iscsi discovery is done, update the UI. We don't need to worry
# about the user escaping from the dialog because all the buttons are
# marked insensitive.
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure. Display some error message and leave the user on the
# dialog to try again.
self.builder.get_object("discoveryErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Success. Now populate the node store and kick the user on over to
# that subscreen.
self._add_nodes(self._discoveredNodes)
self._iscsiNotebook.set_current_page(1)
self._okButton.set_sensitive(True)
# If some form of login credentials were used for discovery,
# default to using the same for login.
if self._authTypeCombo.get_active() != 0:
self._loginAuthTypeCombo.set_active(3)
# We always want to enable this button, in case the user's had enough.
self._cancelButton.set_sensitive(True)
return False
def _set_configure_sensitive(self, sensitivity):
for child in self._configureGrid.get_children():
if child == self._initiatorEntry:
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
elif child == self._bindCheckbox:
self._bindCheckbox.set_sensitive(sensitivity and self.iscsi.mode == "none")
elif child != self._conditionNotebook:
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
# First, update some widgets to not be usable while discovery happens.
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._initiatorEntry.set_sensitive(False)
# Now get the node discovery credentials.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
discoveredLabel = self.builder.get_object("discoveredLabel")
discoveredLabel.set_markup(discoveredLabel.get_label() % {"initiatorName": credentials.initiator,
"targetAddress": credentials.targetIP})
bind = self._bindCheckbox.get_active()
spinner = self.builder.get_object("waitSpinner")
spinner.start()
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_DISCOVER, target=self._discover,
args=(credentials, bind)))
GLib.timeout_add(250, self._check_discover)
# When the initiator name, ip address, and any auth fields are filled in
# valid, only then should the Start button be made sensitive.
def _target_ip_valid(self):
widget = self.builder.get_object("targetEntry")
text = widget.get_text()
try:
IP(text)
return True
except ValueError:
return False
def _initiator_name_valid(self):
widget = self.builder.get_object("initiatorEntry")
text = widget.get_text()
stripped = text.strip()
return "." in stripped and ":" in stripped
def on_discover_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
sensitive = self._target_ip_valid() and self._initiator_name_valid() and credentials_valid(credentials)
self._startButton.set_sensitive(sensitive)
##
## LOGGING IN
##
def _add_nodes(self, nodes):
for node in nodes:
iface = self.iscsi.ifaces.get(node.iface, node.iface)
self._store.append([False, True, node.name, iface])
# We should select the first node by default.
self._store[0][0] = True
def on_login_type_changed(self, widget, *args):
self._loginAuthNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Log In button sensitivity.
self.on_login_field_changed()
def on_row_toggled(self, button, path):
if not path:
return
# Then, go back and mark just this row as selected.
itr = self._store.get_iter(path)
self._store[itr][0] = not self._store[itr][0]
def _login(self, credentials):
for row in self._store:
obj = NodeStoreRow(*row)
if not obj.selected:
continue
for node in self._discoveredNodes:
if obj.notLoggedIn and node.name == obj.name:
# when binding interfaces match also interface
if self.iscsi.ifaces and \
obj.iface != self.iscsi.ifaces[node.iface]:
continue
(rc, msg) = self.iscsi.log_into_node(node,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
if not rc:
self._loginError = msg
return
self._update_devicetree = True
row[1] = False
def _check_login(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_LOGIN):
return True
spinner = self.builder.get_object("loginSpinner")
spinner.stop()
spinner.hide()
if self._loginError:
self.builder.get_object("loginErrorLabel").set_text(self._loginError)
self._loginError = None
self._loginConditionNotebook.set_current_page(1)
self._cancelButton.set_sensitive(True)
self._loginButton.set_sensitive(True)
else:
anyLeft = False
self._loginConditionNotebook.set_current_page(0)
# Select the now-first target for the user in case they want to
# log into another one.
for row in self._store:
if row[1]:
row[0] = True
anyLeft = True
# And make the login button sensitive if there are any more
# nodes to login to.
self._loginButton.set_sensitive(True)
break
self._okButton.set_sensitive(True)
# Once a node has been logged into, it doesn't make much sense to let
# the user cancel. Cancel what, exactly?
self._cancelButton.set_sensitive(False)
if not anyLeft:
self.window.response(1)
self._set_login_sensitive(True)
return False
def _set_login_sensitive(self, sensitivity):
for child in self._loginGrid.get_children():
if child != self._loginConditionNotebook:
child.set_sensitive(sensitivity)
def on_login_clicked(self, *args):
# Make the buttons UI while we work.
self._okButton.set_sensitive(False)
self._cancelButton.set_sensitive(False)
self._loginButton.set_sensitive(False)
self._loginConditionNotebook.set_current_page(0)
self._set_login_sensitive(False)
spinner = self.builder.get_object("loginSpinner")
spinner.start()
spinner.set_visible(True)
spinner.show()
# Are we reusing the credentials from the discovery step? If so, grab them
# out of the UI again here. They should still be there.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_LOGIN, target=self._login,
args=(credentials,)))
GLib.timeout_add(250, self._check_login)
def on_login_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
self._loginButton.set_sensitive(credentials_valid(credentials))
|
Sabayon/anaconda
|
pyanaconda/ui/gui/spokes/advstorage/iscsi.py
|
Python
|
gpl-2.0
| 18,143
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-25 13:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qms_core', '0015_auto_20161114_1808'),
]
operations = [
migrations.AlterField(
model_name='geoservice',
name='source',
field=models.TextField(blank=True, null=True, verbose_name='source'),
),
]
|
nextgis/quickmapservices_server
|
qms_server/qms_core/migrations/0016_auto_20161125_1320.py
|
Python
|
gpl-2.0
| 486
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import wx
# Local imports
import eg
class SerialPortChoice(wx.Choice):
"""
A wx.Choice control that shows all available serial ports on the system.
"""
def __init__(
self,
parent,
id=-1,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=0,
validator=wx.DefaultValidator,
name=wx.ChoiceNameStr,
value=None
):
"""
:Parameters:
`value` : int
The initial port to select (0 = COM1:). The first available
port will be selected if the given port does not exist or
no value is given.
"""
ports = eg.SerialThread.GetAllPorts()
self.ports = ports
choices = [("COM%d" % (portnum + 1)) for portnum in ports]
wx.Choice.__init__(
self, parent, id, pos, size, choices, style, validator, name
)
try:
portPos = ports.index(value)
except ValueError:
portPos = 0
self.SetSelection(portPos)
def GetValue(self):
"""
Return the currently selected serial port.
:rtype: int
:returns: The serial port as an integer (0 = COM1:)
"""
try:
port = self.ports[self.GetSelection()]
except:
port = 0
return port
|
WoLpH/EventGhost
|
eg/Classes/SerialPortChoice.py
|
Python
|
gpl-2.0
| 2,115
|
__author__ = 'dcristian'
import os
import socket
from uuid import getnode as get_mac
import json
import sys
import subprocess
class P:
module_failed = {}
class Constant:
debug_dummy = False
db_values_json = None
db_auto_module_json = None
def __init__(self):
pass
SIGNAL_SENSOR = 'signal-from-sensor'
SIGNAL_UI_DB_POST = 'signal-from-db-post'
SIGNAL_MQTT_RECEIVED = 'signal-from-mqtt-data-received'
# SIGNAL_HEAT = 'signal-from-heat'
SIGNAL_GPIO = 'signal-from-GPIO'
SIGNAL_CAMERA = 'signal-from-CAMERA'
SIGNAL_GPIO_INPUT_PORT_LIST = 'signal-setup-GPIO-input'
SIGNAL_FILE_WATCH = 'signal-from-file-watch'
SIGNAL_DB_CHANGE_FOR_RULES = 'signal_db_change_for_rules'
SIGNAL_UTILITY = 'signal-utility-data'
SIGNAL_UTILITY_EX = 'signal-utility-extra-data'
SIGNAL_PUSH_NOTIFICATION = 'signal-push-notification'
SIGNAL_CHAT_NOTIFICATION = 'signal-chat-notification'
SIGNAL_EMAIL_NOTIFICATION = 'signal-email-notification'
SIGNAL_PRESENCE = 'signal-presence'
SIGNAL_ALARM='signal-alarm'
SIGNAL_GPS = 'signal-gps'
SIGNAL_STORABLE_RECORD = 'signal-storable-record'
SIGNAL_BATTERY_STAT = 'signal-battery-stat'
SIGNAL_USB_DEVICE_CHANGE = 'signal-usb-device-change'
PRESENCE_TYPE_CAM = 'cam'
CONTACT_TYPE_NO = 'contact-no' # contact normally open for ZoneAlarm pin type
ERROR_CONNECT_MAX_RETRY_COUNT = 2
ERROR_CONNECT_PAUSE_SECOND = 1
OS = 'not initialised'
OS_LINUX = {'linux', 'posix'}
OS_WINDOWS = {'windows', 'nt'}
MACHINE_TYPE_BEAGLEBONE = 'beaglebone'
MACHINE_TYPE_RASPBERRY = 'raspberry'
MACHINE_TYPE_ODROID = 'odroid'
MACHINE_TYPE_OPENWRT = 'openwrt'
MACHINE_TYPE_INTEL_LINUX = 'intel-linux'
NOT_INIT = 'not initialised'
HOST_NAME = NOT_INIT
HOST_MAIN_IP = NOT_INIT
HOST_MAC = NOT_INIT
HOST_MACHINE_TYPE = NOT_INIT
HOST_PRIORITY = -1
IS_MACHINE_BEAGLEBONE = False
IS_MACHINE_RASPBERRYPI = False
IS_MACHINE_ODROID = False
IS_MACHINE_INTEL = False
IS_MACHINE_OPENWRT = False
HAS_LOCAL_DB_REPORTING_CAPABILITY = False
MAX_REPORT_LINES = 1000
URL_OPEN_TIMEOUT = 10
@staticmethod
def is_os_windows():
return Constant.OS in Constant.OS_WINDOWS
@staticmethod
def is_os_linux():
return Constant.OS in Constant.OS_LINUX
DB_FIELD_UPDATE = 'updated_on'
SCRIPT_RESPONSE_OK = 'RESULTOK'
SCRIPT_RESPONSE_NOTOK = 'RESULTNOTOK'
P_DB_PATH="DB_PATH"
P_DB_TYPE = "DB_TYPE"
P_MZP_SERVER_URL = 'MZP_SERVER_URL'
P_MQTT_HOST_1 = 'MQTT_HOST_1'
P_MQTT_PORT_1 = 'MQTT_PORT_1'
P_MQTT_TOPIC = 'MQTT_TOPIC'
P_MQTT_TOPIC_MAIN = 'MQTT_TOPIC_MAIN'
P_MQTT_TOPIC_OPENHAB_SEND = 'MQTT_TOPIC_OPENHAB_SEND'
P_MQTT_TOPIC_OPENHAB_RECEIVE = 'MQTT_TOPIC_OPENHAB_RECEIVE'
P_MQTT_TOPIC_OWNTRACKS_RECEIVE = 'MQTT_TOPIC_OWNTRACKS_RECEIVE'
P_MQTT_TOPIC_SONOFF_1 = 'MQTT_TOPIC_SONOFF_1'
P_MQTT_TOPIC_MICRO = 'MQTT_TOPIC_MICRO'
P_MQTT_TOPIC_SHELLY = 'MQTT_TOPIC_SHELLY'
P_MQTT_HOST_2 = 'MQTT_HOST_2'
P_MQTT_PORT_2 = 'MQTT_PORT_2'
P_MQTT_HOST_3 = 'MQTT_HOST_3'
P_MQTT_PORT_3 = 'MQTT_PORT_3'
P_PLOTLY_ALTERNATE_CONFIG = 'P_PLOTLY_ALTERNATE_CONFIG'
P_OWSERVER_HOST_1 = 'OWSERVER_HOST_1'
P_OWSERVER_PORT_1 = 'OWSERVER_PORT_1'
P_DDNS_RACKSPACE_CONFIG_FILE = 'DDNS_RACKSPACE_CONFIG_FILE'
P_USESUDO_DISKTOOLS = 'P_USESUDO_DISKTOOLS'
P_FLASK_WEB_PORT = 'P_FLASK_WEB_PORT'
P_MOTION_VIDEO_PATH = 'P_MOTION_VIDEO_PATH'
P_YOUTUBE_CREDENTIAL_FILE = 'P_YOUTUBE_CREDENTIAL_FILE'
DB_REPORTING_LOCATION_ENABLED = 'DB_REPORTING_LOCATION_ENABLED'
DB_REPORTING_LOCATION = 'DB_REPORTING_LOCATION'
DB_REPORTING_USER = 'DB_REPORTING_USER'
DB_REPORTING_PASS = 'DB_REPORTING_PASS'
P_TEMPERATURE_THRESHOLD = 'P_TEMPERATURE_THRESHOLD'
P_MPD_SERVER = 'P_MPD_SERVER'
P_NEWTIFY_KEY = 'P_NEWTIFY_KEY'
P_HIPCHAT_TOKEN = 'P_HIPCHAT_TOKEN'
P_HIPCHAT_ROOM_API_ID = 'P_HIPCHAT_ROOM_API_ID'
P_MPD_PORT_ZONE = 'P_MPD_PORT_ZONE'
P_LASTFM_CONFIG_FILE = 'LASTFM_CONFIG_FILE'
P_GMUSICPROXY_URL = 'GMUSICPROXY_URL'
P_AMP_SERIAL_HOST = 'AMP_SERIAL_HOST'
P_AMP_SERIAL_PORT = 'AMP_SERIAL_PORT'
P_ALEXA_WEMO_LISTEN_PORT = 'ALEXA_WEMO_LISTEN_PORT'
P_HEAT_SOURCE_MIN_TEMP = 'HEAT_SOURCE_MIN_TEMP'
P_HEAT_STATE_REFRESH_PERIOD = 'HEAT_STATE_REFRESH_PERIOD'
P_MAX_DELTA_TEMP_KEEP_WARM = 'MAX_DELTA_TEMP_KEEP_WARM'
P_GMAIL_NOTIFY_FROM_EMAIL = 'GMAIL_NOTIFY_FROM_EMAIL'
P_GMAIL_CREDENTIAL_FILE = 'GMAIL_CREDENTIAL_FILE'
P_ALL_CREDENTIAL_FILE = 'ALL_CREDENTIAL_FILE'
P_THINGSPEAK_API_FILE = 'THINGSPEAK_API_FILE'
P_NOTIFY_EMAIL_RECIPIENT = 'NOTIFY_EMAIL_RECIPIENT'
P_SOLAR_APS_LOCAL_URL = 'SOLAR_APS_LOCAL_URL'
P_SOLAR_APS_LOCAL_REALTIME_URL = 'SOLAR_APS_LOCAL_REALTIME_URL'
P_SOLAR_UTILITY_NAME = 'SOLAR_UTILITY_NAME'
P_ATREA_LOCAL_URL = "ATREA_LOCAL_URL"
P_PERF_FILE_PATH = 'PERF_FILE_PATH'
P_NEXTCLOUD_PHONETRACK_URL = 'NEXTCLOUD_PHONETRACK_URL'
SMARTCTL_MODEL_FAMILY = 'Model Family:'
SMARTCTL_MODEL_DEVICE = 'Device Model:'
SMARTCTL_SERIAL_NUMBER = 'Serial Number:'
SMARTCTL_TEMP_ID = '194 Temperature_Celsius'
SMARTCTL_STATUS = 'SMART overall-health self-assessment test result:'
SMARTCTL_ERROR_SECTORS = '198 Offline_Uncorrectable'
SMARTCTL_START_STOP_COUNT = '4 Start_Stop_Count'
SMARTCTL_LOAD_CYCLE_COUNT = '193 Load_Cycle_Count'
SMARTCTL_ERROR_NO_DISK = 'Unable to detect device type'
SMARTCTL_DEVICE_IN_STANDBY = 'Device is in STANDBY mode'
HDPARM_STATUS = 'drive state is:'
FREE_MEM_STATUS = 'Mem:'
DISK_DEV_MAIN = '/dev/sd'
JSON_MESSAGE_TYPE = 'message_type'
JSON_PUBLISH_DATE = 'datetime_'
JSON_PUBLISH_TABLE = 'table_'
# JSON_PUBLISH_RECORD_OPERATION='operation_'
JSON_PUBLISH_OPERATION_UPDATE = 'update'
JSON_PUBLISH_SOURCE_HOST = 'source_host_'
JSON_PUBLISH_SRC_HOST = 'source_host'
JSON_PUBLISH_TARGET_HOST = 'target_host_'
JSON_PUBLISH_VALUE_TARGET_HOST_ALL = '*'
JSON_PUBLISH_GRAPH_X = 'graph_x_'
JSON_PUBLISH_GRAPH_Y = 'graph_y_'
JSON_PUBLISH_GRAPH_SHAPE = 'graph_shape_'
JSON_PUBLISH_GRAPH_ID = 'graph_id_'
JSON_PUBLISH_GRAPH_LEGEND = 'graph_legend_'
# use exact field names from class BaseGraph
JSON_PUBLISH_SAVE_TO_GRAPH = 'save_to_graph'
JSON_PUBLISH_SAVE_TO_HISTORY = 'save_to_history'
JSON_PUBLISH_FIELDS_CHANGED = 'last_commit_field_changed_list'
JSON_PUBLISH_NOTIFY_TRANSPORT = 'notify_transport_enabled'
JSON_PUBLISH_NOTIFY_DB_COMMIT = 'notified_on_db_commit'
# use exact field name from class DbBase
JSON_PUBLISH_RECORD_UUID = 'record_uuid'
GPIO_PIN_TYPE_BBB = 'bbb'
GPIO_PIN_TYPE_PI_STDGPIO = 'gpio'
GPIO_PIN_TYPE_PI_FACE_SPI = 'piface'
GPIO_PIN_TYPE_PI_PCF8574 = 'pcf8574'
GPIO_PIN_TYPE_ZWAVE = 'zwave'
GPIO_PIN_TYPE_SONOFF = 'sonoff'
GPIO_PIN_DIRECTION_IN = 'in'
GPIO_PIN_DIRECTION_OUT = 'out'
UTILITY_TYPE_ELECTRICITY='electricity'
#UTILITY_TYPE_ELECTRICITY_MEASURE = 'kWh'
#UTILITY_TYPE_ELECTRICITY_MEASURE_2 = 'watt'
UTILITY_TYPE_WATER = 'water'
#UTILITY_TYPE_WATER_MEASURE = 'l'
UTILITY_TYPE_GAS = 'gas'
#UTILITY_TYPE_GAS_MEASURE = 'l'
UTILITY_TYPE_WATER_LEVEL = 'water level'
IO_SENSOR_PURPOSE = 'utility'
LOG_SENSOR_INACTIVE = ''
DB_REPORTING_ID = 'reporting'
def _install(package):
print('Installing missing module {}'.format(package))
return subprocess.call([sys.executable, "-m", "pip", "install", package])
def _install_apt(apt_list):
for apt in apt_list:
print('Installing missing apt {}'.format(apt))
subprocess.call(["sudo", "apt", "install", "-y", apt])
def fix_module(ex):
try:
error_message = '{}'.format(ex)
mod_err = None
if 'No module named ' in error_message:
ar = error_message.split('No module named ')
if len(ar) == 2:
mod_err = ar[1]
else:
msg = 'Unable to split {}, unexpected len {}'.format(error_message, len(ar))
elif 'cannot import name' in error_message:
key = "from '"
start = error_message.find(key)
start = start + len(key)
end = error_message.find("'", start)
mod_err = error_message[start:end]
if mod_err is not None:
mod_name = mod_err.replace("'", "")
package_names, apt_names = get_package_name(mod_name)
if package_names is None:
package_names = [mod_name]
if apt_names is not None:
_install_apt(apt_names)
failed = None
for package_name in package_names:
if package_name not in P.module_failed:
res = _install(package_name)
print('Install package {} returned {}'.format(package_name, res))
if res != 0:
P.module_failed[package_name] = res
failed = True
else:
print('Not retrying a failed package install for {}'.format(package_name))
if failed is None:
return True
else:
return False
except Exception as ex:
msg = 'Fixmodule exception err={}'.format(ex)
print(msg)
return False
def get_app_root_path():
return os.getcwd() + '/'
def load_config_json():
from main.logger_helper import L
try:
var_path = get_app_root_path() + 'scripts/config/default_db_values.json'
L.l.info('Loading variables from config file [{}]'.format(var_path))
with open(var_path, 'r') as f:
Constant.db_values_json = json.load(f)
except Exception as ex:
L.l.error('Cannot load config json, ex={}'.format(ex))
exit(2)
def load_auto_module_json():
from main.logger_helper import L
try:
var_path = get_app_root_path() + 'auto_module.json'
L.l.info('Loading module mapping from config file [{}]'.format(var_path))
with open(var_path, 'r') as f:
Constant.db_auto_module_json = json.load(f)
except Exception as ex:
L.l.error('Cannot load module config json, ex={}'.format(ex))
exit(2)
def get_json_param(name):
""" retrieves parameter value from json config file"""
param_fields = Constant.db_values_json["Parameter"]
value = None
for config_record in param_fields:
if config_record["name"] == name:
value = config_record["value"]
break
return value
def get_package_name(module_name):
""" retrieves parameter value from json config file"""
param_fields = Constant.db_auto_module_json["Package"]
values = None
apts = None
for config_record in param_fields:
if config_record["module"] == module_name:
value = config_record["package"]
if ',' in value:
values = value.split(',')
else:
values = [value]
if "apt" in config_record.keys():
apt = config_record["apt"]
if ',' in apt:
apts = apt.split(',')
else:
apts = [apt]
break
return values, apts
def get_table(table_name):
if table_name in Constant.db_values_json:
return Constant.db_values_json[table_name]
else:
return None
def init():
Constant.OS = os.name
Constant.HOST_NAME = socket.gethostname()
load_config_json()
load_auto_module_json()
from main.logger_helper import L
try:
mac = get_mac()
# call it twice as get_mac might fake mac: http://stackoverflow.com/questions/159137/getting-mac-address
if mac == get_mac():
Constant.HOST_MAC = ':'.join(("%012X" % mac)[i:i + 2] for i in range(0, 12, 2))
else:
L.l.warning('Cannot get mac address')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com", 80))
Constant.HOST_MAIN_IP = s.getsockname()[0]
s.close()
except Exception as ex:
L.l.warning('Cannot obtain main IP accurately, not connected to Internet?, retry, ex={}'.format(ex))
try:
Constant.HOST_MAIN_IP = socket.gethostbyname(socket.gethostname())
except Exception as ex2:
L.l.warning('Cannot obtain main IP, no DNS available?, ex={}'.format(ex2))
Constant.HOST_MAIN_IP = '127.0.0.1'
L.l.info('Running on OS={} HOST={} IP={} MACHINE={}'.format(
Constant.OS, Constant.HOST_NAME, Constant.HOST_MAIN_IP, Constant.HOST_MACHINE_TYPE))
def get_secure_general(token):
config_file = get_json_param(Constant.P_ALL_CREDENTIAL_FILE)
with open(config_file, 'r') as f:
config = json.load(f)
return config[token]
def init_simple():
Constant.OS = os.name
Constant.HOST_NAME = socket.gethostname()
|
dan-cristian/haiot
|
common/__init__.py
|
Python
|
gpl-2.0
| 13,018
|
import os
# import tempfile
from ..compat import is_win32, is_py3
import xbmc, xbmcvfs
xdg_cache = tmp_dir = xbmc.translatePath('special://profile/addon_data/script.module.streamlink.base')
if not xbmcvfs.exists(tmp_dir):
xbmcvfs.mkdirs(tmp_dir)
if is_win32:
try:
from ctypes import windll, cast, c_ulong, c_void_p, byref
except MemoryError:
pass
PIPE_ACCESS_OUTBOUND = 0x00000002
PIPE_TYPE_BYTE = 0x00000000
PIPE_READMODE_BYTE = 0x00000000
PIPE_WAIT = 0x00000000
PIPE_UNLIMITED_INSTANCES = 255
INVALID_HANDLE_VALUE = -1
class NamedPipe(object):
def __init__(self, name):
self.fifo = None
self.pipe = None
if is_win32:
self.path = os.path.join("\\\\.\\pipe", name)
self.pipe = self._create_named_pipe(self.path)
else:
self.path = os.path.join(tmp_dir, name)
self._create_fifo(self.path)
def _create_fifo(self, name):
os.mkfifo(name, 0o660)
def _create_named_pipe(self, path):
bufsize = 8192
if is_py3:
create_named_pipe = windll.kernel32.CreateNamedPipeW
else:
create_named_pipe = windll.kernel32.CreateNamedPipeA
pipe = create_named_pipe(path, PIPE_ACCESS_OUTBOUND,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES,
bufsize, bufsize,
0, None)
if pipe == INVALID_HANDLE_VALUE:
error_code = windll.kernel32.GetLastError()
raise IOError("Error code 0x{0:08X}".format(error_code))
return pipe
def open(self, mode):
if not self.pipe:
self.fifo = open(self.path, mode)
def write(self, data):
if self.pipe:
windll.kernel32.ConnectNamedPipe(self.pipe, None)
written = c_ulong(0)
windll.kernel32.WriteFile(self.pipe, cast(data, c_void_p),
len(data), byref(written),
None)
return written
else:
return self.fifo.write(data)
def close(self):
if self.pipe:
windll.kernel32.DisconnectNamedPipe(self.pipe)
else:
self.fifo.close()
os.unlink(self.path)
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamlink.base/resources/lib/streamlink/utils/named_pipe.py
|
Python
|
gpl-2.0
| 2,398
|
"""Runs Capacity and Utilization with Replication Workload."""
from utils.appliance import IPAppliance
from utils.conf import cfme_performance
from utils.grafana import get_scenario_dashboard_urls
from utils.log import logger
from utils.providers import get_crud
from utils.smem_memory_monitor import add_workload_quantifiers, SmemMemoryMonitor
from utils.ssh import SSHClient, SSHTail
from utils.workloads import get_capacity_and_utilization_replication_scenarios
import time
import pytest
roles_cap_and_util_rep = ['automate', 'database_operations', 'database_synchronization',
'ems_inventory', 'ems_metrics_collector', 'ems_metrics_coordinator',
'ems_metrics_processor', 'ems_operations', 'event', 'notifier',
'reporting', 'scheduler', 'user_interface', 'web_services']
@pytest.mark.usefixtures('generate_version_files')
@pytest.mark.parametrize('scenario', get_capacity_and_utilization_replication_scenarios())
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
"""Runs through provider based scenarios enabling C&U and replication, run for a set period of
time. Memory Monitor creates graphs and summary at the end of each scenario."""
from_ts = int(time.time() * 1000)
ssh_client = appliance.ssh_client()
ssh_master_args = {
'hostname': scenario['replication_master']['ip_address'],
'username': scenario['replication_master']['ssh']['username'],
'password': scenario['replication_master']['ssh']['password']}
master_appliance = IPAppliance(address=scenario['replication_master']['ip_address'],
openshift_creds=ssh_master_args)
ssh_client_master = SSHClient(**ssh_master_args)
logger.debug('Scenario: {}'.format(scenario['name']))
is_pglogical = True if scenario['replication'] == 'pglogical' else False
# Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
appliance.set_pglogical_replication(replication_type=':none')
# Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
# under test is cleaned first, followed by master appliance
sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
sshtail_evm.set_initial_file_end()
logger.info('Clean appliance under test ({})'.format(ssh_client))
appliance.clean_appliance()
logger.info('Clean master appliance ({})'.format(ssh_client_master))
master_appliance.clean_appliance() # Clean Replication master appliance
if is_pglogical:
scenario_data = {'appliance_ip': appliance.hostname,
'appliance_name': cfme_performance['appliance']['appliance_name'],
'test_dir': 'workload-cap-and-util-rep',
'test_name': 'Capacity and Utilization Replication (pgLogical)',
'appliance_roles': ', '.join(roles_cap_and_util_rep),
'scenario': scenario}
else:
scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
'appliance_name': cfme_performance['appliance']['appliance_name'],
'test_dir': 'workload-cap-and-util-rep',
'test_name': 'Capacity and Utilization Replication (RubyRep)',
'appliance_roles': ', '.join(roles_cap_and_util_rep),
'scenario': scenario}
quantifiers = {}
monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)
def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
starttime = time.time()
to_ts = int(starttime * 1000)
g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
logger.debug('Started cleaning up monitoring thread.')
monitor_thread.grafana_urls = g_urls
monitor_thread.signal = False
monitor_thread.join()
add_workload_quantifiers(quantifiers, scenario_data)
timediff = time.time() - starttime
logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))
monitor_thread.start()
appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
for provider in scenario['providers']:
get_crud(provider).create_rest()
logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
time.sleep(scenario['refresh_sleep_time'])
appliance.set_cap_and_util_all_via_rails()
# Configure Replication
if is_pglogical:
# Setup appliance under test to :remote
appliance.set_pglogical_replication(replication_type=':remote')
# Setup master appliance to :global
master_appliance.set_pglogical_replication(replication_type=':global')
# Setup master to subscribe:
master_appliance.add_pglogical_replication_subscription(ssh_client_master,
appliance.address)
else:
# Setup local towards Master
appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
# Force uninstall rubyrep for this region from master (Unsure if still needed)
# ssh_client.run_rake_command('evm:dbsync:uninstall')
# time.sleep(30) # Wait to quiecse
# Turn on DB Sync role
appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
# Variable amount of time for C&U collections/processing
total_time = scenario['total_time']
starttime = time.time()
elapsed_time = 0
while (elapsed_time < total_time):
elapsed_time = time.time() - starttime
time_left = total_time - elapsed_time
logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
if (time_left > 0 and time_left < 300):
time.sleep(time_left)
elif time_left > 0:
time.sleep(300)
# Turn off replication:
if is_pglogical:
appliance.set_pglogical_replication(replication_type=':none')
else:
appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
logger.info('Test Ending...')
|
dajohnso/cfme_tests
|
cfme/tests/perf/workloads/test_capacity_and_utilization_replication.py
|
Python
|
gpl-2.0
| 6,340
|
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
"""Definition of the Atoms class.
This module defines the central object in the ASE package: the Atoms
object.
"""
import warnings
from math import cos, sin
import numpy as np
from ase.atom import Atom
from ase.data import atomic_numbers, chemical_symbols, atomic_masses
import ase.units as units
class Atoms(object):
"""Atoms object.
The Atoms object can represent an isolated molecule, or a
periodically repeated structure. It has a unit cell and
there may be periodic boundary conditions along any of the three
unit cell axes.
Information about the atoms (atomic numbers and position) is
stored in ndarrays. Optionally, there can be information about
tags, momenta, masses, magnetic moments and charges.
In order to calculate energies, forces and stresses, a calculator
object has to attached to the atoms object.
Parameters:
symbols: str (formula) or list of str
Can be a string formula, a list of symbols or a list of
Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],
[Atom('Ne', (x, y, z)), ...].
positions: list of xyz-positions
Atomic positions. Anything that can be converted to an
ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2),
...].
scaled_positions: list of scaled-positions
Like positions, but given in units of the unit cell.
Can not be set at the same time as positions.
numbers: list of int
Atomic numbers (use only one of symbols/numbers).
tags: list of int
Special purpose tags.
momenta: list of xyz-momenta
Momenta for all atoms.
masses: list of float
Atomic masses in atomic units.
magmoms: list of float or list of xyz-values
Magnetic moments. Can be either a single value for each atom
for collinear calculations or three numbers for each atom for
non-collinear calculations.
charges: list of float
Atomic charges.
cell: 3x3 matrix
Unit cell vectors. Can also be given as just three
numbers for orthorhombic cells. Default value: [1, 1, 1].
celldisp: Vector
Unit cell displacement vector. To visualize a displaced cell
around the center of mass of a Systems of atoms. Default value = (0,0,0)
pbc: one or three bool
Periodic boundary conditions flags. Examples: True,
False, 0, 1, (1, 1, 0), (True, False, False). Default
value: False.
constraint: constraint object(s)
Used for applying one or more constraints during structure
optimization.
calculator: calculator object
Used to attach a calculator for calculating energies and atomic
forces.
info: dict of key-value pairs
Dictionary of key-value pairs with additional information
about the system. The following keys may be used by ase:
- spacegroup: Spacegroup instance
- unit_cell: 'conventional' | 'primitive' | int | 3 ints
- adsorbate_info:
Items in the info attribute survives copy and slicing and can
be store to and retrieved from trajectory files given that the
key is a string, the value is picklable and, if the value is a
user-defined object, its base class is importable. One should
not make any assumptions about the existence of keys.
Examples:
These three are equivalent:
>>> d = 1.104 # N2 bondlength
>>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)])
>>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)])
>>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d)])
FCC gold:
>>> a = 4.05 # Gold lattice constant
>>> b = a / 2
>>> fcc = Atoms('Au',
... cell=[(0, b, b), (b, 0, b), (b, b, 0)],
... pbc=True)
Hydrogen wire:
>>> d = 0.9 # H-H distance
>>> L = 7.0
>>> h = Atoms('H', positions=[(0, L / 2, L / 2)],
... cell=(d, L, L),
... pbc=(1, 0, 0))
"""
def __init__(self, symbols=None,
positions=None, numbers=None,
tags=None, momenta=None, masses=None,
magmoms=None, charges=None,
scaled_positions=None,
cell=None, pbc=None, celldisp=None,
constraint=None,
calculator=None,
info=None):
atoms = None
if hasattr(symbols, 'GetUnitCell'):
from ase.old import OldASEListOfAtomsWrapper
atoms = OldASEListOfAtomsWrapper(symbols)
symbols = None
elif hasattr(symbols, 'get_positions'):
atoms = symbols
symbols = None
elif (isinstance(symbols, (list, tuple)) and
len(symbols) > 0 and isinstance(symbols[0], Atom)):
# Get data from a list or tuple of Atom objects:
data = [[atom.get_raw(name) for atom in symbols]
for name in
['position', 'number', 'tag', 'momentum',
'mass', 'magmom', 'charge']]
atoms = self.__class__(None, *data)
symbols = None
if atoms is not None:
# Get data from another Atoms object:
if scaled_positions is not None:
raise NotImplementedError
if symbols is None and numbers is None:
numbers = atoms.get_atomic_numbers()
if positions is None:
positions = atoms.get_positions()
if tags is None and atoms.has('tags'):
tags = atoms.get_tags()
if momenta is None and atoms.has('momenta'):
momenta = atoms.get_momenta()
if magmoms is None and atoms.has('magmoms'):
magmoms = atoms.get_initial_magnetic_moments()
if masses is None and atoms.has('masses'):
masses = atoms.get_masses()
if charges is None and atoms.has('charges'):
charges = atoms.get_charges()
if cell is None:
cell = atoms.get_cell()
if celldisp is None:
celldisp = atoms.get_celldisp()
if pbc is None:
pbc = atoms.get_pbc()
if constraint is None:
constraint = [c.copy() for c in atoms.constraints]
if calculator is None:
calculator = atoms.get_calculator()
self.arrays = {}
if symbols is None:
if numbers is None:
if positions is not None:
natoms = len(positions)
elif scaled_positions is not None:
natoms = len(scaled_positions)
else:
natoms = 0
numbers = np.zeros(natoms, int)
self.new_array('numbers', numbers, int)
else:
if numbers is not None:
raise ValueError(
'Use only one of "symbols" and "numbers".')
else:
self.new_array('numbers', symbols2numbers(symbols), int)
if cell is None:
cell = np.eye(3)
self.set_cell(cell)
if celldisp is None:
celldisp = np.zeros(shape=(3,1))
self.set_celldisp(celldisp)
if positions is None:
if scaled_positions is None:
positions = np.zeros((len(self.arrays['numbers']), 3))
else:
positions = np.dot(scaled_positions, self._cell)
else:
if scaled_positions is not None:
raise RuntimeError('Both scaled and cartesian positions set!')
self.new_array('positions', positions, float, (3,))
self.set_constraint(constraint)
self.set_tags(default(tags, 0))
self.set_momenta(default(momenta, (0.0, 0.0, 0.0)))
self.set_masses(default(masses, None))
self.set_initial_magnetic_moments(default(magmoms, 0.0))
self.set_charges(default(charges, 0.0))
if pbc is None:
pbc = False
self.set_pbc(pbc)
if info is None:
self.info = {}
else:
self.info = dict(info)
self.adsorbate_info = {}
self.set_calculator(calculator)
def set_calculator(self, calc=None):
"""Attach calculator object."""
if hasattr(calc, '_SetListOfAtoms'):
from ase.old import OldASECalculatorWrapper
calc = OldASECalculatorWrapper(calc, self)
if hasattr(calc, 'set_atoms'):
calc.set_atoms(self)
self._calc = calc
def get_calculator(self):
"""Get currently attached calculator object."""
return self._calc
def _del_calculator(self):
self._calc = None
calc = property(get_calculator, set_calculator, _del_calculator,
doc='Calculator object.')
def set_constraint(self, constraint=None):
"""Apply one or more constrains.
The *constraint* argument must be one constraint object or a
list of constraint objects."""
if constraint is None:
self._constraints = []
else:
if isinstance(constraint, (list, tuple)):
self._constraints = constraint
else:
self._constraints = [constraint]
def _get_constraints(self):
return self._constraints
def _del_constraints(self):
self._constraints = []
constraints = property(_get_constraints, set_constraint, _del_constraints,
'Constraints of the atoms.')
def set_cell(self, cell, scale_atoms=False, fix=None):
"""Set unit cell vectors.
Parameters:
cell :
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell.
scale_atoms : bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> a.set_cell([a, b, c])
>>> a.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> a.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
"""
if fix is not None:
raise TypeError('Please use scale_atoms=%s' % (not fix))
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or '
'3x3 matrix!')
if scale_atoms:
M = np.linalg.solve(self._cell, cell)
self.arrays['positions'][:] = np.dot(self.arrays['positions'], M)
self._cell = cell
def set_celldisp(self, celldisp):
celldisp = np.array(celldisp, float)
self._celldisp = celldisp
def get_celldisp(self):
"""Get the unit cell displacement vectors ."""
return self._celldisp.copy()
def get_cell(self):
"""Get the three unit cell vectors as a 3x3 ndarray."""
return self._cell.copy()
def get_reciprocal_cell(self):
"""Get the three reciprocal lattice vectors as a 3x3 ndarray.
Note that the commonly used factor of 2 pi for Fourier
transforms is not included here."""
rec_unit_cell = np.linalg.inv(self.get_cell()).transpose()
return rec_unit_cell
def set_pbc(self, pbc):
"""Set periodic boundary condition flags."""
if isinstance(pbc, int):
pbc = (pbc,) * 3
self._pbc = np.array(pbc, bool)
def get_pbc(self):
"""Get periodic boundary condition flags."""
return self._pbc.copy()
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""Get an array.
Returns a copy unless the optional argument copy is false.
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'."""
return name in self.arrays
def set_atomic_numbers(self, numbers):
"""Set atomic numbers."""
self.set_array('numbers', numbers, int, ())
def get_atomic_numbers(self):
"""Get integer array of atomic numbers."""
return self.arrays['numbers'].copy()
def set_chemical_symbols(self, symbols):
"""Set chemical symbols."""
self.set_array('numbers', symbols2numbers(symbols), int, ())
def get_chemical_symbols(self, reduce=False):
"""Get list of chemical symbol strings."""
if reduce:
warnings.warn('ase.atoms.get_chemical_symbols(reduce=True) is ' +
'deprecated. Please use ase.atoms.get_chemical' +
'_formula(mode="reduce") instead.',
DeprecationWarning, stacklevel=2)
return self.get_chemical_formula(mode='reduce')
return [chemical_symbols[Z] for Z in self.arrays['numbers']]
def get_chemical_formula(self, mode='hill'):
"""Get the chemial formula as a string based on the chemical symbols.
Parameters:
mode:
There are three different modes available:
'all': The list of chemical symbols are contracted to at string,
e.g. ['C', 'H', 'H', 'H', 'O', 'H'] becomes 'CHHHOH'.
'reduce': The same as 'all' where repeated elements are contracted
to a single symbol and a number, e.g. 'CHHHOCHHH' is reduced to
'CH3OCH3'.
'hill': The list of chemical symbols are contracted to a string
following the Hill notation (alphabetical order with C and H
first), e.g. 'CHHHOCHHH' is reduced to 'C2H6O' and 'SOOHOHO' to
'H2O4S'. This is default.
"""
if len(self) == 0:
return ''
if mode == 'reduce':
numbers = self.get_atomic_numbers()
n = len(numbers)
changes = np.concatenate(([0], np.arange(1, n)[numbers[1:] !=
numbers[:-1]]))
symbols = [chemical_symbols[e] for e in numbers[changes]]
counts = np.append(changes[1:], n) - changes
elif mode == 'hill':
numbers = self.get_atomic_numbers()
elements = np.unique(numbers)
symbols = np.array([chemical_symbols[e] for e in elements])
counts = np.array([(numbers == e).sum() for e in elements])
ind = symbols.argsort()
symbols = symbols[ind]
counts = counts[ind]
if 'H' in symbols:
i = np.arange(len(symbols))[symbols == 'H']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
if 'C' in symbols:
i = np.arange(len(symbols))[symbols == 'C']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
elif mode == 'all':
numbers = self.get_atomic_numbers()
symbols = [chemical_symbols[n] for n in numbers]
counts = [1] * len(numbers)
else:
raise ValueError("Use mode = 'all', 'reduce' or 'hill'.")
formula = ''
for s, c in zip(symbols, counts):
formula += s
if c > 1:
formula += str(c)
return formula
def set_tags(self, tags):
"""Set tags for all atoms."""
self.set_array('tags', tags, int, ())
def get_tags(self):
"""Get integer array of tags."""
if 'tags' in self.arrays:
return self.arrays['tags'].copy()
else:
return np.zeros(len(self), int)
def set_momenta(self, momenta):
"""Set momenta."""
if len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], momenta)
self.set_array('momenta', momenta, float, (3,))
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if 'momenta' in self.arrays:
return self.arrays['momenta'].copy()
else:
return np.zeros((len(self), 3))
def set_masses(self, masses='defaults'):
"""Set atomic masses.
The array masses should contain a list of masses. In case
the masses argument is not given or for those elements of the
masses list that are None, standard values are set."""
if masses == 'defaults':
masses = atomic_masses[self.arrays['numbers']]
elif isinstance(masses, (list, tuple)):
newmasses = []
for m, Z in zip(masses, self.arrays['numbers']):
if m is None:
newmasses.append(atomic_masses[Z])
else:
newmasses.append(m)
masses = newmasses
self.set_array('masses', masses, float, ())
def get_masses(self):
"""Get array of masses."""
if 'masses' in self.arrays:
return self.arrays['masses'].copy()
else:
return atomic_masses[self.arrays['numbers']]
def set_initial_magnetic_moments(self, magmoms=None):
"""Set the initial magnetic moments.
Use either one or three numbers for every atom (collinear
or non-collinear spins)."""
if magmoms is None:
self.set_array('magmoms', None)
else:
magmoms = np.asarray(magmoms)
self.set_array('magmoms', magmoms, float, magmoms.shape[1:])
def get_initial_magnetic_moments(self):
"""Get array of initial magnetic moments."""
if 'magmoms' in self.arrays:
return self.arrays['magmoms'].copy()
else:
return np.zeros(len(self))
def get_magnetic_moments(self):
"""Get calculated local magnetic moments."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if self._calc.get_spin_polarized():
return self._calc.get_magnetic_moments(self)
else:
return np.zeros(len(self))
def get_magnetic_moment(self):
"""Get calculated total magnetic moment."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if self._calc.get_spin_polarized():
return self._calc.get_magnetic_moment(self)
else:
return 0.0
def set_charges(self, charges):
"""Set charges."""
self.set_array('charges', charges, float, ())
def get_charges(self):
"""Get array of charges."""
if 'charges' in self.arrays:
return self.arrays['charges'].copy()
else:
return np.zeros(len(self))
def set_positions(self, newpositions):
"""Set positions, honoring any constraints."""
positions = self.arrays['positions']
if self.constraints:
newpositions = np.array(newpositions, float)
for constraint in self.constraints:
constraint.adjust_positions(positions, newpositions)
self.set_array('positions', newpositions, shape=(3,))
def get_positions(self, wrap=False):
"""Get array of positions. If wrap==True, wraps atoms back
into unit cell.
"""
if wrap:
scaled = self.get_scaled_positions()
return np.dot(scaled, self._cell)
else:
return self.arrays['positions'].copy()
def get_calculation_done(self):
"""Let the calculator calculate its thing,
using the current input.
"""
if self.calc is None:
raise RuntimeError('Atoms object has no calculator.')
self.calc.initialize(self)
self.calc.calculate(self)
def get_potential_energy(self):
"""Calculate potential energy."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energy(self)
def get_potential_energies(self):
"""Calculate the potential energies of all the atoms.
Only available with calculators supporting per-atom energies
(e.g. classical potentials).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energies(self)
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get('momenta')
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get('momenta')
if momenta is None:
return None
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def get_total_energy(self):
"""Get the total energy - potential plus kinetic energy."""
return self.get_potential_energy() + self.get_kinetic_energy()
def get_forces(self, apply_constraint=True):
"""Calculate atomic forces.
Ask the attached calculator to calculate the forces and apply
constraints. Use *apply_constraint=False* to get the raw
forces."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
forces = self._calc.get_forces(self)
if apply_constraint:
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], forces)
return forces
def get_stress(self, voigt=True):
"""Calculate stress tensor.
Returns an array of the six independent components of the
symmetric stress tensor, in the traditional Voigt order
(xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is Voigt
order.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
stress = self._calc.get_stress(self)
shape = stress.shape
if shape == (3, 3):
warnings.warn('Converting 3x3 stress tensor from %s ' %
self._calc.__class__.__name__ +
'calculator to the required Voigt form.')
stress = np.array([stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
else:
assert shape == (6,)
if voigt:
return stress
else:
xx, yy, zz, yz, xz, xy = stress
return np.array([(xx, xy, xz),
(xy, yy, yz),
(xz, yz, zz)])
def get_stresses(self):
"""Calculate the stress-tensor of all the atoms.
Only available with calculators supporting per-atom energies and
stresses (e.g. classical potentials). Even for such calculators
there is a certain arbitrariness in defining per-atom stresses.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_stresses(self)
def get_dipole_moment(self):
"""Calculate the electric dipole moment for the atoms object.
Only available for calculators which has a get_dipole_moment()
method."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_dipole_moment(self)
def copy(self):
"""Return a copy."""
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a.copy()
atoms.constraints = copy.deepcopy(self.constraints)
atoms.adsorbate_info = copy.deepcopy(self.adsorbate_info)
return atoms
def __len__(self):
return len(self.arrays['positions'])
def get_number_of_atoms(self):
"""Returns the number of atoms.
Equivalent to len(atoms) in the standard ASE Atoms class.
"""
return len(self)
def __repr__(self):
num = self.get_atomic_numbers()
N = len(num)
if N == 0:
symbols = ''
elif N <= 60:
symbols = self.get_chemical_formula('reduce')
else:
symbols = self.get_chemical_formula('hill')
s = "%s(symbols='%s', " % (self.__class__.__name__, symbols)
for name in self.arrays:
if name == 'numbers':
continue
s += '%s=..., ' % name
if (self._cell - np.diag(self._cell.diagonal())).any():
s += 'cell=%s, ' % self._cell.tolist()
else:
s += 'cell=%s, ' % self._cell.diagonal().tolist()
s += 'pbc=%s, ' % self._pbc.tolist()
if len(self.constraints) == 1:
s += 'constraint=%s, ' % repr(self.constraints[0])
if len(self.constraints) > 1:
s += 'constraint=%s, ' % repr(self.constraints)
if self._calc is not None:
s += 'calculator=%s(...), ' % self._calc.__class__.__name__
return s[:-2] + ')'
def __add__(self, other):
atoms = self.copy()
atoms += other
return atoms
def extend(self, other):
"""Extend atoms object by appending atoms from *other*."""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self.arrays.items():
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
if name == 'masses':
a2 = other.get_masses()
else:
a2 = other.arrays.get(name)
if a2 is not None:
a[n1:] = a2
self.arrays[name] = a
for name, a2 in other.arrays.items():
if name in self.arrays:
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[n1:] = a2
if name == 'masses':
a[:n1] = self.get_masses()[:n1]
else:
a[:n1] = 0
self.set_array(name, a)
return self
__iadd__ = extend
def append(self, atom):
"""Append atom to end."""
self.extend(self.__class__([atom]))
def __getitem__(self, i):
"""Return a subset of the atoms.
i -- scalar integer, list of integers, or slice object
describing which atoms to return.
If i is a scalar, return an Atom object. If i is a list or a
slice, return an Atoms object with the same cell, pbc, and
other associated info as the original Atoms object. The
indices of the constraints will be shuffled so that they match
the indexing in the subset returned.
"""
if isinstance(i, int):
natoms = len(self)
if i < -natoms or i >= natoms:
raise IndexError('Index out of range.')
return Atom(atoms=self, index=i)
import copy
from ase.constraints import FixConstraint
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
# TODO: Do we need to shuffle indices in adsorbate_info too?
atoms.adsorbate_info = self.adsorbate_info
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a[i].copy()
# Constraints need to be deepcopied, since we need to shuffle
# the indices
atoms.constraints = copy.deepcopy(self.constraints)
condel = []
for con in atoms.constraints:
if isinstance(con, FixConstraint):
try:
con.index_shuffle(i)
except IndexError:
condel.append(con)
for con in condel:
atoms.constraints.remove(con)
return atoms
def __delitem__(self, i):
from ase.constraints import FixAtoms
check_constraint = np.array([isinstance(c, FixAtoms)
for c in self._constraints])
if len(self._constraints) > 0 and not check_constraint.all():
raise RuntimeError('Remove constraint using set_constraint() ' +
'before deleting atoms.')
mask = np.ones(len(self), bool)
mask[i] = False
for name, a in self.arrays.items():
self.arrays[name] = a[mask]
if len(self._constraints) > 0:
for n in range(len(self._constraints)):
self._constraints[n].delete_atom(range(len(mask))[i])
def pop(self, i=-1):
"""Remove and return atom at index *i* (default last)."""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def __imul__(self, m):
"""In-place repeat of atoms."""
if isinstance(m, int):
m = (m, m, m)
M = np.product(m)
n = len(self)
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1))
positions = self.arrays['positions']
i0 = 0
for m0 in range(m[0]):
for m1 in range(m[1]):
for m2 in range(m[2]):
i1 = i0 + n
positions[i0:i1] += np.dot((m0, m1, m2), self._cell)
i0 = i1
if self.constraints is not None:
self.constraints = [c.repeat(m, n) for c in self.constraints]
self._cell = np.array([m[c] * self._cell[c] for c in range(3)])
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
__mul__ = repeat
def translate(self, displacement):
"""Translate atomic positions.
The displacement argument can be a float an xyz vector or an
nx3 array (where n is the number of atoms)."""
self.arrays['positions'] += np.array(displacement)
def shift_in_unit_cell(self, atom_idx, position=(0.5, 0.5, 0.5),
cartesian=False, fold=True):
"""Shift atoms in unit cell so that atom number 'atom_idx'
is positioned at 'position'.
By default, atom 'atom_idx' is centered and all the atoms are folded
Parameters:
cartesian (default: False): If True, 'position' is expressed in
Carthesian coordinates, direct coordinates are used otherwise.
fold (default: True): If True and periodic boundary conditions are
enforced, atoms outside the unit cell are replaced with their
equivalents within the unit cell.
"""
if not cartesian:
position = self._cell.T.dot(position)
self.translate(position - self.arrays["positions"][atom_idx])
if fold:
self.fold_positions()
def center(self, vacuum=None, axis=None):
"""Center atoms in unit cell.
Centers the atoms in the unit cell, so there is the same
amount of vacuum on all sides.
Parameters:
vacuum (default: None): If specified adjust the amount of
vacuum when centering. If vacuum=10.0 there will thus be 10
Angstrom of vacuum on each side.
axis (default: None): If specified, only act on the specified
axis. Default: Act on all axes.
"""
# Find the orientations of the faces of the unit cell
c = self.get_cell()
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.sqrt(np.dot(dirs[i], dirs[i])) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if axis is None:
axes = (0, 1, 2)
else:
axes = (axis,)
p = self.arrays['positions']
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.sqrt(np.dot(c[i], c[i]))
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self._cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.arrays['positions'] += translation
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays['positions']) / m.sum()
if scaled:
return np.linalg.solve(self._cell.T, com)
else:
return com
def get_moments_of_inertia(self, vectors=False):
"""Get the moments of inertia along the principal axes.
The three principal moments of inertia are computed from the
eigenvalues of the symmetric inertial tensor. Periodic boundary
conditions are ignored. Units of the moments of inertia are
amu*angstrom**2.
"""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
masses = self.get_masses()
#initialize elements of the inertial tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
m = masses[i]
I11 += m * (y**2 + z**2)
I22 += m * (x**2 + z**2)
I33 += m * (x**2 + y**2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
I = np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
evals, evecs = np.linalg.eigh(I)
if vectors:
return evals, evecs.transpose()
else:
return evals
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def rotate(self, v, a=None, center=(0, 0, 0), rotate_cell=False):
"""Rotate atoms based on a vector and an angle, or two vectors.
Parameters:
v:
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
a = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
center = (0, 0, 0):
The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> a = pi / 2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
v = string2vector(v)
if a is None:
a = norm(v)
if isinstance(a, (float, int)):
v /= norm(v)
c = cos(a)
s = sin(a)
else:
v2 = string2vector(a)
v /= norm(v)
v2 /= norm(v2)
c = np.dot(v, v2)
v = np.cross(v, v2)
s = norm(v)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
v = np.cross((0, 0, 1), v2)
if norm(v) < eps:
v = np.cross((1, 0, 0), v2)
assert norm(v) >= eps
elif s > 0:
v /= s
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
p = self.arrays['positions'] - center
self.arrays['positions'][:] = (c * p -
np.cross(p, s * v) +
np.outer(np.dot(p, v), (1.0 - c) * v) +
center)
if rotate_cell:
rotcell = self.get_cell()
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * v) +
np.outer(np.dot(rotcell, v), (1.0 - c) * v))
self.set_cell(rotcell)
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. A sequence of length 3 with the
coordinates, or 'COM' to select the center of mass, 'COP' to
select center of positions or 'COU' to select center of cell.
phi :
The 1st rotation angle around the z axis.
theta :
Rotation around the x axis.
psi :
2nd rotation around the z axis.
"""
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
rcoords = self.positions - center
# First Euler rotation about z in matrix form
D = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
self.positions = np.transpose(rcoords) + center
def get_dihedral(self, list):
"""Calculate dihedral angle.
Calculate dihedral angle between the vectors list[0]->list[1]
and list[2]->list[3], where list contains the atomic indexes
in question.
"""
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = self.positions[list[1]] - self.positions[list[0]]
b = self.positions[list[2]] - self.positions[list[1]]
c = self.positions[list[3]] - self.positions[list[2]]
bxa = np.cross(b, a)
bxa /= np.linalg.norm(bxa)
cxb = np.cross(c, b)
cxb /= np.linalg.norm(cxb)
angle = np.vdot(bxa, cxb)
# check for numerical trouble due to finite precision:
if angle < -1:
angle = -1
if angle > 1:
angle = 1
angle = np.arccos(angle)
if np.vdot(bxa, c) > 0:
angle = 2 * np.pi - angle
return angle
def _masked_rotate(self, center, axis, diff, mask):
# do rotation of subgroup by copying it to temporary atoms object
# and then rotating that
#
# recursive object definition might not be the most elegant thing,
# more generally useful might be a rotation function with a mask?
group = self.__class__()
for i in range(len(self)):
if mask[i]:
group += self[i]
group.translate(-center)
group.rotate(axis, diff)
group.translate(center)
# set positions in original atoms object
j = 0
for i in range(len(self)):
if mask[i]:
self.positions[i] = group[j].position
j += 1
def set_dihedral(self, list, angle, mask=None):
"""
set the dihedral angle between vectors list[0]->list[1] and
list[2]->list[3] by changing the atom indexed by list[3]
if mask is not None, all the atoms described in mask
(read: the entire subgroup) are moved
example: the following defines a very crude
ethane-like molecule and twists one half of it by 30 degrees.
>>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0],
[1, 0, 0], [2, 1, 0], [2, -1, 0]])
>>> atoms.set_dihedral([1,2,3,4],7*pi/6,mask=[0,0,0,1,1,1])
"""
# if not provided, set mask to the last atom in the
# dihedral description
if mask is None:
mask = np.zeros(len(self))
mask[list[3]] = 1
# compute necessary in dihedral change, from current value
current = self.get_dihedral(list)
diff = angle - current
axis = self.positions[list[2]] - self.positions[list[1]]
center = self.positions[list[2]]
self._masked_rotate(center, axis, diff, mask)
def rotate_dihedral(self, list, angle, mask=None):
"""Rotate dihedral angle.
Complementing the two routines above: rotate a group by a
predefined dihedral angle, starting from its current
configuration
"""
start = self.get_dihedral(list)
self.set_dihedral(list, angle + start, mask)
def get_angle(self, list):
"""Get angle formed by three atoms.
calculate angle between the vectors list[1]->list[0] and
list[1]->list[2], where list contains the atomic indexes in
question."""
# normalized vector 1->0, 1->2:
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)
return angle
def set_angle(self, list, angle, mask=None):
"""Set angle formed by three atoms.
Sets the angle between vectors list[1]->list[0] and
list[1]->list[2].
Same usage as in set_dihedral."""
# If not provided, set mask to the last atom in the angle description
if mask is None:
mask = np.zeros(len(self))
mask[list[2]] = 1
# Compute necessary in angle change, from current value
current = self.get_angle(list)
diff = angle - current
# Do rotation of subgroup by copying it to temporary atoms object and
# then rotating that
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
axis = np.cross(v10, v12)
center = self.positions[list[1]]
self._masked_rotate(center, axis, diff, mask)
def rattle(self, stdev=0.001, seed=42):
"""Randomly displace atoms.
This method adds random displacements to the atomic positions,
taking a possible constraint into account. The random numbers are
drawn from a normal distribution of standard deviation stdev.
For a parallel calculation, it is important to use the same
seed on all processors! """
rs = np.random.RandomState(seed)
positions = self.arrays['positions']
self.set_positions(positions +
rs.normal(scale=stdev, size=positions.shape))
def fold_positions(self):
"""Fold atoms back into the unit cell"""
# Use the transpose of position array (limitation of np.linalg.solve)
pos_T = self.arrays['positions'].T
# Take the transpose of the cell (to get the right projections)
cell_T = self._cell.T
# Project atoms positions in a cubic cell and fold
cub_pos = np.linalg.solve(cell_T, pos_T)
for i, pbc in enumerate(self._pbc):
if pbc:
cub_pos %= 1.
# Doing it twice is not really needed since with PBC, 0==1
# But it seems to upset people, so...
cub_pos %= 1.
# Project back to the original unit cell
self.arrays['positions'][:] = (cell_T.dot(cub_pos)).T
def get_distance(self, a0, a1, mic=False):
"""Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
return np.linalg.norm(D)
def set_distance(self, a0, a1, distance, fix=0.5):
"""Set the distance between two atoms.
Set the distance between atoms *a0* and *a1* to *distance*.
By default, the center of the two atoms will be fixed. Use
*fix=0* to fix the first atom, *fix=1* to fix the second
atom and *fix=0.5* (default) to fix the center of the bond."""
R = self.arrays['positions']
D = R[a1] - R[a0]
x = 1.0 - distance / np.linalg.norm(D)
R[a0] += (x * fix) * D
R[a1] -= (x * (1.0 - fix)) * D
def get_scaled_positions(self):
"""Get positions relative to unit cell.
Atoms outside the unit cell will be wrapped into the cell in
those directions with periodic boundary conditions so that the
scaled coordinates are between zero and one."""
scaled = np.linalg.solve(self._cell.T, self.arrays['positions'].T).T
for i in range(3):
if self._pbc[i]:
# Yes, we need to do it twice.
# See the scaled_positions.py test
scaled[:, i] %= 1.0
scaled[:, i] %= 1.0
return scaled
def set_scaled_positions(self, scaled):
"""Set positions relative to unit cell."""
self.arrays['positions'][:] = np.dot(scaled, self._cell)
def get_temperature(self):
"""Get the temperature. in Kelvin"""
ekin = self.get_kinetic_energy() / len(self)
return ekin / (1.5 * units.kB)
def get_isotropic_pressure(self, stress):
"""Get the current calculated pressure, assume isotropic medium.
in Bar
"""
if type(stress) == type(1.0) or type(stress) == type(1):
return -stress * 1e-5 / units.Pascal
elif stress.shape == (3, 3):
return (-(stress[0, 0] + stress[1, 1] + stress[2, 2]) / 3.0) * \
1e-5 / units.Pascal
elif stress.shape == (6,):
return (-(stress[0] + stress[1] + stress[2]) / 3.0) * \
1e-5 / units.Pascal
else:
raise ValueError('The external stress has the wrong shape.')
def __eq__(self, other):
"""Check for identity of two atoms objects.
Identity means: same positions, atomic numbers, unit cell and
periodic boundary conditions."""
try:
a = self.arrays
b = other.arrays
return (len(self) == len(other) and
(a['positions'] == b['positions']).all() and
(a['numbers'] == b['numbers']).all() and
(self._cell == other.cell).all() and
(self._pbc == other.pbc).all())
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
else:
return not eq
__hash__ = None
def get_volume(self):
"""Get volume of unit cell."""
return abs(np.linalg.det(self._cell))
def _get_positions(self):
"""Return reference to positions-array for in-place manipulations."""
return self.arrays['positions']
def _set_positions(self, pos):
"""Set positions directly, bypassing constraints."""
self.arrays['positions'][:] = pos
positions = property(_get_positions, _set_positions,
doc='Attribute for direct ' +
'manipulation of the positions.')
def _get_atomic_numbers(self):
"""Return reference to atomic numbers for in-place
manipulations."""
return self.arrays['numbers']
numbers = property(_get_atomic_numbers, set_atomic_numbers,
doc='Attribute for direct ' +
'manipulation of the atomic numbers.')
def _get_cell(self):
"""Return reference to unit cell for in-place manipulations."""
return self._cell
cell = property(_get_cell, set_cell, doc='Attribute for direct ' +
'manipulation of the unit cell.')
def _get_pbc(self):
"""Return reference to pbc-flags for in-place manipulations."""
return self._pbc
pbc = property(_get_pbc, set_pbc,
doc='Attribute for direct manipulation ' +
'of the periodic boundary condition flags.')
def get_name(self):
import warnings
warnings.warn('ase.atoms.get_name is deprecated. Please use ase.' +
'atoms.get_chemical_formula(mode="hill") instead.',
DeprecationWarning, stacklevel=2)
return self.get_chemical_formula(mode='hill')
def write(self, filename, format=None, **kwargs):
"""Write yourself to a file."""
from ase.io import write
write(filename, self, format, **kwargs)
def edit(self):
"""Modify atoms interactively through ag viewer.
Conflicts leading to undesirable behaviour might arise
when matplotlib has been pre-imported with certain
incompatible backends and while trying to use the
plot feature inside the interactive ag. To circumvent,
please set matplotlib.use('gtk') before calling this
method.
"""
from ase.gui.images import Images
from ase.gui.gui import GUI
images = Images([self])
gui = GUI(images)
gui.run()
# use atoms returned from gui:
# (1) delete all currently available atoms
self.set_constraint()
for z in range(len(self)):
self.pop()
edited_atoms = gui.images.get_atoms(0)
# (2) extract atoms from edit session
self.extend(edited_atoms)
self.set_constraint(edited_atoms._get_constraints())
self.set_cell(edited_atoms.get_cell())
self.set_initial_magnetic_moments(edited_atoms.get_magnetic_moments())
self.set_tags(edited_atoms.get_tags())
return
def string2symbols(s):
"""Convert string to list of chemical symbols."""
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == '(':
p = 0
for i, c in enumerate(s):
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1:j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = []
for s in symbols:
if isinstance(s, str):
numbers.append(atomic_numbers[s])
else:
numbers.append(s)
return numbers
def string2vector(v):
if isinstance(v, str):
if v[0] == '-':
return -string2vector(v[1:])
w = np.zeros(3)
w['xyz'.index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""Helper function for setting default values."""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
|
conwayje/ase-python
|
ase/atoms.py
|
Python
|
gpl-2.0
| 57,335
|
#!/usr/bin/env python
#
# lsdserver -- Linked Sensor Data Server
# Copyright (C) 2014 Geoff Williams <geoff@geoffwilliams.me.uk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import os
from flask import Flask, render_template
from lsdserver.platform import platform
from lsdserver.parameter import parameter
from lsdserver.version import version
from lsdserver.sensor import sensor
from lsdserver.phenomena import phenomena
from lsdserver.flag import flag
from lsdserver.ui import ui
from lsdserver import status
from flask.ext.sqlalchemy import SQLAlchemy
from lsdserver.backend import mysql
# https://pythonhosted.org/Flask-Uploads/
def load_config(app, name, app_dir):
"""
Read a config file from a well-known location
"""
app.debug = True
config_file = name + ".cfg"
app.logger.info("app_name:%s; app_dir:%s;" % (name, app_dir))
# /etc/NAME.cfg
etc_config_file = "/etc/" + config_file
rel_config_file = app_dir + "/" + config_file
if os.path.isfile(etc_config_file):
f = etc_config_file
elif os.path.isfile(rel_config_file):
f = rel_config_file
else:
f = False
app.logger.warn("No %s found!" % config_file)
if f:
app.logger.info("config file: %s" % f)
app.config.from_pyfile(f)
if app.config["LOGDIR"]:
logfile = app.config["LOGDIR"] + "/lsdserver.log"
app.logger.info("logging to %s" % logfile)
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
def create_app(app_dir):
app = Flask(__name__)
load_config(app, __name__, app_dir)
app.register_blueprint(version, url_prefix='/version')
app.register_blueprint(platform, url_prefix='/platform')
app.register_blueprint(parameter, url_prefix='/parameter')
app.register_blueprint(sensor, url_prefix='/sensor')
app.register_blueprint(phenomena, url_prefix='/phenomena')
app.register_blueprint(flag, url_prefix='/flag')
app.register_blueprint(ui, url_prefix="")
# database
app.system = mysql.Mysql()
app.db = SQLAlchemy(app)
app.system.session = app.db.session
# general stuff - error pages etc
app.errorhandler(404)(not_found_error)
app.errorhandler(408)(conflict_error)
app.errorhandler(500)(internal_error)
return app
def not_found_error(error):
return render_template('404.html'), status.NOT_FOUND
def conflict_error(error):
return render_template('409.html'), status.CONFLICT
def internal_error(error):
#app.db.session.rollback()
return render_template('500.html'), status.SERVER_ERROR
|
GeoffWilliams/lsdserver
|
lsdserver/__init__.py
|
Python
|
gpl-2.0
| 3,360
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of Logilab-common.
#
# Logilab-common is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# Logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""This module provides bases for predicates dispatching (the pattern in use
here is similar to what's refered as multi-dispatch or predicate-dispatch in the
literature, though a bit different since the idea is to select across different
implementation 'e.g. classes), not to dispatch a message to a function or
method. It contains the following classes:
* :class:`RegistryStore`, the top level object which loads implementation
objects and stores them into registries. You'll usually use it to access
registries and their contained objects;
* :class:`Registry`, the base class which contains objects semantically grouped
(for instance, sharing a same API, hence the 'implementation' name). You'll
use it to select the proper implementation according to a context. Notice you
may use registries on their own without using the store.
.. Note::
implementation objects are usually designed to be accessed through the
registry and not by direct instantiation, besides to use it as base classe.
The selection procedure is delegated to a selector, which is responsible for
scoring the object according to some context. At the end of the selection, if an
implementation has been found, an instance of this class is returned. A selector
is built from one or more predicates combined together using AND, OR, NOT
operators (actually `&`, `|` and `~`). You'll thus find some base classes to
build predicates:
* :class:`Predicate`, the abstract base predicate class
* :class:`AndPredicate`, :class:`OrPredicate`, :class:`NotPredicate`, which you
shouldn't have to use directly. You'll use `&`, `|` and '~' operators between
predicates directly
* :func:`objectify_predicate`
You'll eventually find one concrete predicate: :class:`yes`
.. autoclass:: RegistryStore
.. autoclass:: Registry
Predicates
----------
.. autoclass:: Predicate
.. autofunc:: objectify_predicate
.. autoclass:: yes
Debugging
---------
.. autoclass:: traced_selection
Exceptions
----------
.. autoclass:: RegistryException
.. autoclass:: RegistryNotFound
.. autoclass:: ObjectNotFound
.. autoclass:: NoSelectableObject
"""
__docformat__ = "restructuredtext en"
import sys
import types
import weakref
import traceback as tb
from os import listdir, stat
from os.path import join, isdir, exists
from logging import getLogger
from warnings import warn
from logilab.common.modutils import modpath_from_file
from logilab.common.logging_ext import set_log_methods
from logilab.common.decorators import classproperty
class RegistryException(Exception):
"""Base class for registry exception."""
class RegistryNotFound(RegistryException):
"""Raised when an unknown registry is requested.
This is usually a programming/typo error.
"""
class ObjectNotFound(RegistryException):
"""Raised when an unregistered object is requested.
This may be a programming/typo or a misconfiguration error.
"""
class NoSelectableObject(RegistryException):
"""Raised when no object is selectable for a given context."""
def __init__(self, args, kwargs, objects):
self.args = args
self.kwargs = kwargs
self.objects = objects
def __str__(self):
return ('args: %s, kwargs: %s\ncandidates: %s'
% (self.args, self.kwargs.keys(), self.objects))
def _modname_from_path(path, extrapath=None):
modpath = modpath_from_file(path, extrapath)
# omit '__init__' from package's name to avoid loading that module
# once for each name when it is imported by some other object
# module. This supposes import in modules are done as::
#
# from package import something
#
# not::
#
# from package.__init__ import something
#
# which seems quite correct.
if modpath[-1] == '__init__':
modpath.pop()
return '.'.join(modpath)
def _toload_info(path, extrapath, _toload=None):
"""Return a dictionary of <modname>: <modpath> and an ordered list of
(file, module name) to load
"""
if _toload is None:
assert isinstance(path, list)
_toload = {}, []
for fileordir in path:
if isdir(fileordir) and exists(join(fileordir, '__init__.py')):
subfiles = [join(fileordir, fname) for fname in listdir(fileordir)]
_toload_info(subfiles, extrapath, _toload)
elif fileordir[-3:] == '.py':
modname = _modname_from_path(fileordir, extrapath)
_toload[0][modname] = fileordir
_toload[1].append((fileordir, modname))
return _toload
class RegistrableObject(object):
"""This is the base class for registrable objects which are selected
according to a context.
:attr:`__registry__`
name of the registry for this object (string like 'views',
'templates'...). You may want to define `__registries__` directly if your
object should be registered in several registries.
:attr:`__regid__`
object's identifier in the registry (string like 'main',
'primary', 'folder_box')
:attr:`__select__`
class'selector
Moreover, the `__abstract__` attribute may be set to True to indicate that a
class is abstract and should not be registered.
You don't have to inherit from this class to put it in a registry (having
`__regid__` and `__select__` is enough), though this is needed for classes
that should be automatically registered.
"""
__registry__ = None
__regid__ = None
__select__ = None
__abstract__ = True # see doc snipppets below (in Registry class)
@classproperty
def __registries__(cls):
if cls.__registry__ is None:
return ()
return (cls.__registry__,)
class RegistrableInstance(RegistrableObject):
"""Inherit this class if you want instances of the classes to be
automatically registered.
"""
def __new__(cls, *args, **kwargs):
"""Add a __module__ attribute telling the module where the instance was
created, for automatic registration.
"""
obj = super(RegistrableInstance, cls).__new__(cls)
# XXX subclass must no override __new__
filepath = tb.extract_stack(limit=2)[0][0]
obj.__module__ = _modname_from_path(filepath)
return obj
class Registry(dict):
"""The registry store a set of implementations associated to identifier:
* to each identifier are associated a list of implementations
* to select an implementation of a given identifier, you should use one of the
:meth:`select` or :meth:`select_or_none` method
* to select a list of implementations for a context, you should use the
:meth:`possible_objects` method
* dictionary like access to an identifier will return the bare list of
implementations for this identifier.
To be usable in a registry, the only requirement is to have a `__select__`
attribute.
At the end of the registration process, the :meth:`__registered__`
method is called on each registered object which have them, given the
registry in which it's registered as argument.
Registration methods:
.. automethod: register
.. automethod: unregister
Selection methods:
.. automethod: select
.. automethod: select_or_none
.. automethod: possible_objects
.. automethod: object_by_id
"""
def __init__(self, debugmode):
super(Registry, self).__init__()
self.debugmode = debugmode
def __getitem__(self, name):
"""return the registry (list of implementation objects) associated to
this name
"""
try:
return super(Registry, self).__getitem__(name)
except KeyError:
raise ObjectNotFound(name), None, sys.exc_info()[-1]
@classmethod
def objid(cls, obj):
"""returns a unique identifier for an object stored in the registry"""
return '%s.%s' % (obj.__module__, cls.objname(obj))
@classmethod
def objname(cls, obj):
"""returns a readable name for an object stored in the registry"""
return getattr(obj, '__name__', id(obj))
def initialization_completed(self):
"""call method __registered__() on registered objects when the callback
is defined"""
for objects in self.itervalues():
for objectcls in objects:
registered = getattr(objectcls, '__registered__', None)
if registered:
registered(self)
if self.debugmode:
wrap_predicates(_lltrace)
def register(self, obj, oid=None, clear=False):
"""base method to add an object in the registry"""
assert not '__abstract__' in obj.__dict__, obj
assert obj.__select__, obj
oid = oid or obj.__regid__
assert oid, ('no explicit name supplied to register object %s, '
'which has no __regid__ set' % obj)
if clear:
objects = self[oid] = []
else:
objects = self.setdefault(oid, [])
assert not obj in objects, 'object %s is already registered' % obj
objects.append(obj)
def register_and_replace(self, obj, replaced):
"""remove <replaced> and register <obj>"""
# XXXFIXME this is a duplication of unregister()
# remove register_and_replace in favor of unregister + register
# or simplify by calling unregister then register here
if not isinstance(replaced, basestring):
replaced = self.objid(replaced)
# prevent from misspelling
assert obj is not replaced, 'replacing an object by itself: %s' % obj
registered_objs = self.get(obj.__regid__, ())
for index, registered in enumerate(registered_objs):
if self.objid(registered) == replaced:
del registered_objs[index]
break
else:
self.warning('trying to replace %s that is not registered with %s',
replaced, obj)
self.register(obj)
def unregister(self, obj):
"""remove object <obj> from this registry"""
objid = self.objid(obj)
oid = obj.__regid__
for registered in self.get(oid, ()):
# use self.objid() to compare objects because vreg will probably
# have its own version of the object, loaded through execfile
if self.objid(registered) == objid:
self[oid].remove(registered)
break
else:
self.warning('can\'t remove %s, no id %s in the registry',
objid, oid)
def all_objects(self):
"""return a list containing all objects in this registry.
"""
result = []
for objs in self.values():
result += objs
return result
# dynamic selection methods ################################################
def object_by_id(self, oid, *args, **kwargs):
"""return object with the `oid` identifier. Only one object is expected
to be found.
raise :exc:`ObjectNotFound` if there are no object with id `oid` in this
registry
raise :exc:`AssertionError` if there is more than one object there
"""
objects = self[oid]
assert len(objects) == 1, objects
return objects[0](*args, **kwargs)
def select(self, __oid, *args, **kwargs):
"""return the most specific object among those with the given oid
according to the given context.
raise :exc:`ObjectNotFound` if there are no object with id `oid` in this
registry
raise :exc:`NoSelectableObject` if no object can be selected
"""
obj = self._select_best(self[__oid], *args, **kwargs)
if obj is None:
raise NoSelectableObject(args, kwargs, self[__oid] )
return obj
def select_or_none(self, __oid, *args, **kwargs):
"""return the most specific object among those with the given oid
according to the given context, or None if no object applies.
"""
try:
return self._select_best(self[__oid], *args, **kwargs)
except ObjectNotFound:
return None
def possible_objects(self, *args, **kwargs):
"""return an iterator on possible objects in this registry for the given
context
"""
for objects in self.itervalues():
obj = self._select_best(objects, *args, **kwargs)
if obj is None:
continue
yield obj
def _select_best(self, objects, *args, **kwargs):
"""return an instance of the most specific object according
to parameters
return None if not object apply (don't raise `NoSelectableObject` since
it's costly when searching objects using `possible_objects`
(e.g. searching for hooks).
"""
score, winners = 0, None
for obj in objects:
objectscore = obj.__select__(obj, *args, **kwargs)
if objectscore > score:
score, winners = objectscore, [obj]
elif objectscore > 0 and objectscore == score:
winners.append(obj)
if winners is None:
return None
if len(winners) > 1:
# log in production environement / test, error while debugging
msg = 'select ambiguity: %s\n(args: %s, kwargs: %s)'
if self.debugmode:
# raise bare exception in debug mode
raise Exception(msg % (winners, args, kwargs.keys()))
self.error(msg, winners, args, kwargs.keys())
# return the result of calling the object
return self.selected(winners[0], args, kwargs)
def selected(self, winner, args, kwargs):
"""override here if for instance you don't want "instanciation"
"""
return winner(*args, **kwargs)
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
def obj_registries(cls, registryname=None):
"""return a tuple of registry names (see __registries__)"""
if registryname:
return (registryname,)
return cls.__registries__
class RegistryStore(dict):
"""This class is responsible for loading objects and storing them
in their registry which is created on the fly as needed.
It handles dynamic registration of objects and provides a
convenient api to access them. To be recognized as an object that
should be stored into one of the store's registry
(:class:`Registry`), an object must provide the following
attributes, used control how they interact with the registry:
:attr:`__registries__`
list of registry names (string like 'views', 'templates'...) into which
the object should be registered
:attr:`__regid__`
object identifier in the registry (string like 'main',
'primary', 'folder_box')
:attr:`__select__`
the object predicate selectors
Moreover, the :attr:`__abstract__` attribute may be set to `True`
to indicate that an object is abstract and should not be registered
(such inherited attributes not considered).
.. Note::
When using the store to load objects dynamically, you *always* have
to use **super()** to get the methods and attributes of the
superclasses, and not use the class identifier. If not, you'll get into
trouble at reload time.
For example, instead of writing::
class Thing(Parent):
__regid__ = 'athing'
__select__ = yes()
def f(self, arg1):
Parent.f(self, arg1)
You must write::
class Thing(Parent):
__regid__ = 'athing'
__select__ = yes()
def f(self, arg1):
super(Thing, self).f(arg1)
Controlling object registration
-------------------------------
Dynamic loading is triggered by calling the
:meth:`register_objects` method, given a list of directories to
inspect for python modules.
.. automethod: register_objects
For each module, by default, all compatible objects are registered
automatically. However if some objects come as replacement of
other objects, or have to be included only if some condition is
met, you'll have to define a `registration_callback(vreg)`
function in the module and explicitly register **all objects** in
this module, using the api defined below.
.. automethod:: RegistryStore.register_all
.. automethod:: RegistryStore.register_and_replace
.. automethod:: RegistryStore.register
.. automethod:: RegistryStore.unregister
.. Note::
Once the function `registration_callback(vreg)` is implemented in a
module, all the objects from this module have to be explicitly
registered as it disables the automatic object registration.
Examples:
.. sourcecode:: python
def registration_callback(store):
# register everything in the module except BabarClass
store.register_all(globals().values(), __name__, (BabarClass,))
# conditionally register BabarClass
if 'babar_relation' in store.schema:
store.register(BabarClass)
In this example, we register all application object classes defined in the module
except `BabarClass`. This class is then registered only if the 'babar_relation'
relation type is defined in the instance schema.
.. sourcecode:: python
def registration_callback(store):
store.register(Elephant)
# replace Babar by Celeste
store.register_and_replace(Celeste, Babar)
In this example, we explicitly register classes one by one:
* the `Elephant` class
* the `Celeste` to replace `Babar`
If at some point we register a new appobject class in this module, it won't be
registered at all without modification to the `registration_callback`
implementation. The first example will register it though, thanks to the call
to the `register_all` method.
Controlling registry instantiation
----------------------------------
The `REGISTRY_FACTORY` class dictionary allows to specify which class should
be instantiated for a given registry name. The class associated to `None`
key will be the class used when there is no specific class for a name.
"""
def __init__(self, debugmode=False):
super(RegistryStore, self).__init__()
self.debugmode = debugmode
def reset(self):
"""clear all registries managed by this store"""
# don't use self.clear, we want to keep existing subdictionaries
for subdict in self.itervalues():
subdict.clear()
self._lastmodifs = {}
def __getitem__(self, name):
"""return the registry (dictionary of class objects) associated to
this name
"""
try:
return super(RegistryStore, self).__getitem__(name)
except KeyError:
raise RegistryNotFound(name), None, sys.exc_info()[-1]
# methods for explicit (un)registration ###################################
# default class, when no specific class set
REGISTRY_FACTORY = {None: Registry}
def registry_class(self, regid):
"""return existing registry named regid or use factory to create one and
return it"""
try:
return self.REGISTRY_FACTORY[regid]
except KeyError:
return self.REGISTRY_FACTORY[None]
def setdefault(self, regid):
try:
return self[regid]
except RegistryNotFound:
self[regid] = self.registry_class(regid)(self.debugmode)
return self[regid]
def register_all(self, objects, modname, butclasses=()):
"""register registrable objects into `objects`.
Registrable objects are properly configured subclasses of
:class:`RegistrableObject`. Objects which are not defined in the module
`modname` or which are in `butclasses` won't be registered.
Typical usage is:
.. sourcecode:: python
store.register_all(globals().values(), __name__, (ClassIWantToRegisterExplicitly,))
So you get partially automatic registration, keeping manual registration
for some object (to use
:meth:`~logilab.common.registry.RegistryStore.register_and_replace` for
instance).
"""
assert isinstance(modname, basestring), \
'modname expected to be a module name (ie string), got %r' % modname
for obj in objects:
if self.is_registrable(obj) and obj.__module__ == modname and not obj in butclasses:
if isinstance(obj, type):
self._load_ancestors_then_object(modname, obj, butclasses)
else:
self.register(obj)
def register(self, obj, registryname=None, oid=None, clear=False):
"""register `obj` implementation into `registryname` or
`obj.__registries__` if not specified, with identifier `oid` or
`obj.__regid__` if not specified.
If `clear` is true, all objects with the same identifier will be
previously unregistered.
"""
assert not obj.__dict__.get('__abstract__'), obj
for registryname in obj_registries(obj, registryname):
registry = self.setdefault(registryname)
registry.register(obj, oid=oid, clear=clear)
self.debug("register %s in %s['%s']",
registry.objname(obj), registryname, oid or obj.__regid__)
self._loadedmods.setdefault(obj.__module__, {})[registry.objid(obj)] = obj
def unregister(self, obj, registryname=None):
"""unregister `obj` object from the registry `registryname` or
`obj.__registries__` if not specified.
"""
for registryname in obj_registries(obj, registryname):
registry = self[registryname]
registry.unregister(obj)
self.debug("unregister %s from %s['%s']",
registry.objname(obj), registryname, obj.__regid__)
def register_and_replace(self, obj, replaced, registryname=None):
"""register `obj` object into `registryname` or
`obj.__registries__` if not specified. If found, the `replaced` object
will be unregistered first (else a warning will be issued as it is
generally unexpected).
"""
for registryname in obj_registries(obj, registryname):
registry = self[registryname]
registry.register_and_replace(obj, replaced)
self.debug("register %s in %s['%s'] instead of %s",
registry.objname(obj), registryname, obj.__regid__,
registry.objname(replaced))
# initialization methods ###################################################
def init_registration(self, path, extrapath=None):
"""reset registry and walk down path to return list of (path, name)
file modules to be loaded"""
# XXX make this private by renaming it to _init_registration ?
self.reset()
# compute list of all modules that have to be loaded
self._toloadmods, filemods = _toload_info(path, extrapath)
# XXX is _loadedmods still necessary ? It seems like it's useful
# to avoid loading same module twice, especially with the
# _load_ancestors_then_object logic but this needs to be checked
self._loadedmods = {}
return filemods
def register_objects(self, path, extrapath=None):
"""register all objects found walking down <path>"""
# load views from each directory in the instance's path
# XXX inline init_registration ?
filemods = self.init_registration(path, extrapath)
for filepath, modname in filemods:
self.load_file(filepath, modname)
self.initialization_completed()
def initialization_completed(self):
"""call initialization_completed() on all known registries"""
for reg in self.itervalues():
reg.initialization_completed()
def _mdate(self, filepath):
""" return the modification date of a file path """
try:
return stat(filepath)[-2]
except OSError:
# this typically happens on emacs backup files (.#foo.py)
self.warning('Unable to load %s. It is likely to be a backup file',
filepath)
return None
def is_reload_needed(self, path):
"""return True if something module changed and the registry should be
reloaded
"""
lastmodifs = self._lastmodifs
for fileordir in path:
if isdir(fileordir) and exists(join(fileordir, '__init__.py')):
if self.is_reload_needed([join(fileordir, fname)
for fname in listdir(fileordir)]):
return True
elif fileordir[-3:] == '.py':
mdate = self._mdate(fileordir)
if mdate is None:
continue # backup file, see _mdate implementation
elif "flymake" in fileordir:
# flymake + pylint in use, don't consider these they will corrupt the registry
continue
if fileordir not in lastmodifs or lastmodifs[fileordir] < mdate:
self.info('File %s changed since last visit', fileordir)
return True
return False
def load_file(self, filepath, modname):
""" load registrable objects (if any) from a python file """
from logilab.common.modutils import load_module_from_name
if modname in self._loadedmods:
return
self._loadedmods[modname] = {}
mdate = self._mdate(filepath)
if mdate is None:
return # backup file, see _mdate implementation
elif "flymake" in filepath:
# flymake + pylint in use, don't consider these they will corrupt the registry
return
# set update time before module loading, else we get some reloading
# weirdness in case of syntax error or other error while importing the
# module
self._lastmodifs[filepath] = mdate
# load the module
module = load_module_from_name(modname)
self.load_module(module)
def load_module(self, module):
"""Automatically handle module objects registration.
Instances are registered as soon as they are hashable and have the
following attributes:
* __regid__ (a string)
* __select__ (a callable)
* __registries__ (a tuple/list of string)
For classes this is a bit more complicated :
- first ensure parent classes are already registered
- class with __abstract__ == True in their local dictionary are skipped
- object class needs to have registries and identifier properly set to a
non empty string to be registered.
"""
self.info('loading %s from %s', module.__name__, module.__file__)
if hasattr(module, 'registration_callback'):
module.registration_callback(self)
else:
self.register_all(vars(module).itervalues(), module.__name__)
def _load_ancestors_then_object(self, modname, objectcls, butclasses=()):
"""handle class registration according to rules defined in
:meth:`load_module`
"""
# backward compat, we used to allow whatever else than classes
if not isinstance(objectcls, type):
if self.is_registrable(objectcls) and objectcls.__module__ == modname:
self.register(objectcls)
return
# imported classes
objmodname = objectcls.__module__
if objmodname != modname:
# The module of the object is not the same as the currently
# worked on module, or this is actually an instance, which
# has no module at all
if objmodname in self._toloadmods:
# if this is still scheduled for loading, let's proceed immediately,
# but using the object module
self.load_file(self._toloadmods[objmodname], objmodname)
return
# ensure object hasn't been already processed
clsid = '%s.%s' % (modname, objectcls.__name__)
if clsid in self._loadedmods[modname]:
return
self._loadedmods[modname][clsid] = objectcls
# ensure ancestors are registered
for parent in objectcls.__bases__:
self._load_ancestors_then_object(modname, parent, butclasses)
# ensure object is registrable
if objectcls in butclasses or not self.is_registrable(objectcls):
return
# backward compat
reg = self.setdefault(obj_registries(objectcls)[0])
if reg.objname(objectcls)[0] == '_':
warn("[lgc 0.59] object whose name start with '_' won't be "
"skipped anymore at some point, use __abstract__ = True "
"instead (%s)" % objectcls, DeprecationWarning)
return
# register, finally
self.register(objectcls)
@classmethod
def is_registrable(cls, obj):
"""ensure `obj` should be registered
as arbitrary stuff may be registered, do a lot of check and warn about
weird cases (think to dumb proxy objects)
"""
if isinstance(obj, type):
if not issubclass(obj, RegistrableObject):
# ducktyping backward compat
if not (getattr(obj, '__registries__', None)
and getattr(obj, '__regid__', None)
and getattr(obj, '__select__', None)):
return False
elif issubclass(obj, RegistrableInstance):
return False
elif not isinstance(obj, RegistrableInstance):
return False
if not obj.__regid__:
return False # no regid
registries = obj.__registries__
if not registries:
return False # no registries
selector = obj.__select__
if not selector:
return False # no selector
if obj.__dict__.get('__abstract__', False):
return False
# then detect potential problems that should be warned
if not isinstance(registries, (tuple, list)):
cls.warning('%s has __registries__ which is not a list or tuple', obj)
return False
if not callable(selector):
cls.warning('%s has not callable __select__', obj)
return False
return True
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
# init logging
set_log_methods(RegistryStore, getLogger('registry.store'))
set_log_methods(Registry, getLogger('registry'))
# helpers for debugging selectors
TRACED_OIDS = None
def _trace_selector(cls, selector, args, ret):
vobj = args[0]
if TRACED_OIDS == 'all' or vobj.__regid__ in TRACED_OIDS:
print '%s -> %s for %s(%s)' % (cls, ret, vobj, vobj.__regid__)
def _lltrace(selector):
"""use this decorator on your predicates so they become traceable with
:class:`traced_selection`
"""
def traced(cls, *args, **kwargs):
ret = selector(cls, *args, **kwargs)
if TRACED_OIDS is not None:
_trace_selector(cls, selector, args, ret)
return ret
traced.__name__ = selector.__name__
traced.__doc__ = selector.__doc__
return traced
class traced_selection(object): # pylint: disable=C0103
"""
Typical usage is :
.. sourcecode:: python
>>> from logilab.common.registry import traced_selection
>>> with traced_selection():
... # some code in which you want to debug selectors
... # for all objects
Don't forget the 'from __future__ import with_statement' at the module top-level
if you're using python prior to 2.6.
This will yield lines like this in the logs::
selector one_line_rset returned 0 for <class 'elephant.Babar'>
You can also give to :class:`traced_selection` the identifiers of objects on
which you want to debug selection ('oid1' and 'oid2' in the example above).
.. sourcecode:: python
>>> with traced_selection( ('regid1', 'regid2') ):
... # some code in which you want to debug selectors
... # for objects with __regid__ 'regid1' and 'regid2'
A potentially useful point to set up such a tracing function is
the `logilab.common.registry.Registry.select` method body.
"""
def __init__(self, traced='all'):
self.traced = traced
def __enter__(self):
global TRACED_OIDS
TRACED_OIDS = self.traced
def __exit__(self, exctype, exc, traceback):
global TRACED_OIDS
TRACED_OIDS = None
return traceback is None
# selector base classes and operations ########################################
def objectify_predicate(selector_func):
"""Most of the time, a simple score function is enough to build a selector.
The :func:`objectify_predicate` decorator turn it into a proper selector
class::
@objectify_predicate
def one(cls, req, rset=None, **kwargs):
return 1
class MyView(View):
__select__ = View.__select__ & one()
"""
return type(selector_func.__name__, (Predicate,),
{'__doc__': selector_func.__doc__,
'__call__': lambda self, *a, **kw: selector_func(*a, **kw)})
_PREDICATES = {}
def wrap_predicates(decorator):
for predicate in _PREDICATES.itervalues():
if not '_decorators' in predicate.__dict__:
predicate._decorators = set()
if decorator in predicate._decorators:
continue
predicate._decorators.add(decorator)
predicate.__call__ = decorator(predicate.__call__)
class PredicateMetaClass(type):
def __new__(cls, *args, **kwargs):
# use __new__ so subclasses doesn't have to call Predicate.__init__
inst = type.__new__(cls, *args, **kwargs)
proxy = weakref.proxy(inst, lambda p: _PREDICATES.pop(id(p)))
_PREDICATES[id(proxy)] = proxy
return inst
class Predicate(object):
"""base class for selector classes providing implementation
for operators ``&``, ``|`` and ``~``
This class is only here to give access to binary operators, the selector
logic itself should be implemented in the :meth:`__call__` method. Notice it
should usually accept any arbitrary arguments (the context), though that may
vary depending on your usage of the registry.
a selector is called to help choosing the correct object for a
particular context by returning a score (`int`) telling how well
the implementation given as first argument fit to the given context.
0 score means that the class doesn't apply.
"""
__metaclass__ = PredicateMetaClass
@property
def func_name(self):
# backward compatibility
return self.__class__.__name__
def search_selector(self, selector):
"""search for the given selector, selector instance or tuple of
selectors in the selectors tree. Return None if not found.
"""
if self is selector:
return self
if (isinstance(selector, type) or isinstance(selector, tuple)) and \
isinstance(self, selector):
return self
return None
def __str__(self):
return self.__class__.__name__
def __and__(self, other):
return AndPredicate(self, other)
def __rand__(self, other):
return AndPredicate(other, self)
def __iand__(self, other):
return AndPredicate(self, other)
def __or__(self, other):
return OrPredicate(self, other)
def __ror__(self, other):
return OrPredicate(other, self)
def __ior__(self, other):
return OrPredicate(self, other)
def __invert__(self):
return NotPredicate(self)
# XXX (function | function) or (function & function) not managed yet
def __call__(self, cls, *args, **kwargs):
return NotImplementedError("selector %s must implement its logic "
"in its __call__ method" % self.__class__)
def __repr__(self):
return u'<Predicate %s at %x>' % (self.__class__.__name__, id(self))
class MultiPredicate(Predicate):
"""base class for compound selector classes"""
def __init__(self, *selectors):
self.selectors = self.merge_selectors(selectors)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
','.join(str(s) for s in self.selectors))
@classmethod
def merge_selectors(cls, selectors):
"""deal with selector instanciation when necessary and merge
multi-selectors if possible:
AndPredicate(AndPredicate(sel1, sel2), AndPredicate(sel3, sel4))
==> AndPredicate(sel1, sel2, sel3, sel4)
"""
merged_selectors = []
for selector in selectors:
# XXX do we really want magic-transformations below?
# if so, wanna warn about them?
if isinstance(selector, types.FunctionType):
selector = objectify_predicate(selector)()
if isinstance(selector, type) and issubclass(selector, Predicate):
selector = selector()
assert isinstance(selector, Predicate), selector
if isinstance(selector, cls):
merged_selectors += selector.selectors
else:
merged_selectors.append(selector)
return merged_selectors
def search_selector(self, selector):
"""search for the given selector or selector instance (or tuple of
selectors) in the selectors tree. Return None if not found
"""
for childselector in self.selectors:
if childselector is selector:
return childselector
found = childselector.search_selector(selector)
if found is not None:
return found
# if not found in children, maybe we are looking for self?
return super(MultiPredicate, self).search_selector(selector)
class AndPredicate(MultiPredicate):
"""and-chained selectors"""
def __call__(self, cls, *args, **kwargs):
score = 0
for selector in self.selectors:
partscore = selector(cls, *args, **kwargs)
if not partscore:
return 0
score += partscore
return score
class OrPredicate(MultiPredicate):
"""or-chained selectors"""
def __call__(self, cls, *args, **kwargs):
for selector in self.selectors:
partscore = selector(cls, *args, **kwargs)
if partscore:
return partscore
return 0
class NotPredicate(Predicate):
"""negation selector"""
def __init__(self, selector):
self.selector = selector
def __call__(self, cls, *args, **kwargs):
score = self.selector(cls, *args, **kwargs)
return int(not score)
def __str__(self):
return 'NOT(%s)' % self.selector
class yes(Predicate): # pylint: disable=C0103
"""Return the score given as parameter, with a default score of 0.5 so any
other selector take precedence.
Usually used for objects which can be selected whatever the context, or
also sometimes to add arbitrary points to a score.
Take care, `yes(0)` could be named 'no'...
"""
def __init__(self, score=0.5):
self.score = score
def __call__(self, *args, **kwargs):
return self.score
# deprecated stuff #############################################################
from logilab.common.deprecation import deprecated
@deprecated('[lgc 0.59] use Registry.objid class method instead')
def classid(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
@deprecated('[lgc 0.59] use obj_registries function instead')
def class_registries(cls, registryname):
return obj_registries(cls, registryname)
|
hpfem/agros2d
|
resources/python/logilab/common/registry.py
|
Python
|
gpl-2.0
| 41,377
|
__author__ = 'lenovo'
# -*- coding: utf-8 -*-
DOMAIN = u'http://rs.xidian.edu.cn/'
USERNAME = u'***'
PASSWORD = u'***'
LOGINFIELD = u'username'
COOKIETIME = 2592000
HOMEURL = DOMAIN + u'forum.php'
LOGINURL = DOMAIN + u'member.php?mod=logging&action=login&loginsubmit=yes&handlekey=login&loginhash=LCaB3&inajax=1'
|
pang1567/rsmovie
|
src/config.py
|
Python
|
gpl-2.0
| 315
|
'''
Using the Python language, have the function AdditivePersistence(num) take the num parameter being passed
which will always be a positive integer and return its additive persistence
which is the number of times you must add the digits in num until you reach a single digit.
For example: if num is 2718 then your program should return 2 because 2 + 7 + 1 + 8 = 18 and 1 + 8 = 9 and you stop at 9.
'''
def AdditivePersistence(num):
steps = 0
while num > 9:
snum = str(num)
sdigits = list(snum)
digits = [int(x) for x in sdigits]
num = sum(digits)
steps = steps + 1
return steps
# keep this function call here
# to see how to enter arguments in Python scroll down
print AdditivePersistence(raw_input())
|
anomen-s/programming-challenges
|
coderbyte.com/easy/Additive Persistence/solve.py
|
Python
|
gpl-2.0
| 777
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
bibauthorid_bdinterface
This is the only file in bibauthorid which should
use the data base. It should have an interface for
all other files in the module.
'''
import bibauthorid_config as bconfig
import sys
import numpy
import cPickle
import zlib
from itertools import groupby, count, ifilter, chain, imap
from operator import itemgetter
from invenio.access_control_engine import acc_authorize_action
from bibauthorid_name_utils import split_name_parts
from bibauthorid_name_utils import create_canonical_name
from bibauthorid_name_utils import create_normalized_name
from bibauthorid_general_utils import bibauthor_print
from bibauthorid_general_utils import update_status \
, update_status_final
from dbquery import run_sql \
, OperationalError \
, ProgrammingError
def get_sql_time():
'''
Returns the time acoarding to the database. The type is datetime.datetime.
'''
return run_sql("select now()")[0][0]
def set_personid_row(person_id, tag, value, opt1=0, opt2=0, opt3=""):
'''
Inserts data and the additional options of a person by a given personid and tag.
'''
run_sql("INSERT INTO aidPERSONIDDATA "
"(`personid`, `tag`, `data`, `opt1`, `opt2`, `opt3`) "
"VALUES (%s, %s, %s, %s, %s, %s)",
(person_id, tag, value, opt1, opt2, opt3))
def get_personid_row(person_id, tag):
'''
Returns all the records associated to a person and a tag.
@param person_id: id of the person to read the attribute from
@type person_id: int
@param tag: the tag to read.
@type tag: string
@return: the data associated with a virtual author
@rtype: tuple of tuples
'''
return run_sql("SELECT data, opt1, opt2, opt3 "
"data FROM aidPERSONIDDATA "
"WHERE personid = %s AND tag = %s",
(person_id, tag))
def del_personid_row(tag, person_id=None, value=None):
'''
Change the value associated to the given tag for a certain person.
@param person_id: ID of the person
@type person_id: int
@param tag: tag to be updated
@type tag: string
@param value: value to be written for the tag
@type value: string
'''
if person_id:
if value:
run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s and data=%s", (person_id, tag, value,))
else:
run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s", (person_id, tag,))
else:
if value:
run_sql("delete from aidPERSONIDDATA where tag=%s and data=%s", (tag, value,))
else:
run_sql("delete from aidPERSONIDDATA where tag=%s", (tag,))
def get_all_papers_of_pids(personid_list):
'''
Get all papers of authors in a given list and sorts the results
by bibrefrec.
@param personid_list: list with the authors.
@type personid_list: iteratable of integers.
'''
if personid_list:
plist = list_2_SQL_str(personid_list, lambda x: str(x))
paps = run_sql("select personid, bibref_table, bibref_value, bibrec, flag "
"from aidPERSONIDPAPERS "
"where personid in %s "
% plist)
inner = set(row[1:4] for row in paps if row[4] > -2)
return (x for x in paps if x[1:4] in inner)
return ()
def del_person_not_manually_claimed_papers(pid):
'''
Deletes papers from a person which have not been manually claimed.
'''
run_sql("delete from aidPERSONIDPAPERS "
"where and (flag <> '-2' and flag <> '2') and personid=%s", (pid,))
def get_personid_from_uid(uid):
'''
Returns the personID associated with the provided ui.
If the personID is already associated with the person the secon parameter is True, false otherwise.
@param uid: userID
@type uid: ((int,),)
'''
pid = run_sql("select personid from aidPERSONIDDATA where tag=%s and data=%s", ('uid', str(uid[0][0])))
if len(pid) == 1:
return (pid[0], True)
else:
return ([-1], False)
def get_uid_from_personid(pid):
uid = run_sql("select data from aidPERSONIDDATA where tag='uid' and personid = %s", (pid,))
if uid:
return uid[0][0]
else:
return None
def get_new_personid():
pids = (run_sql("select max(personid) from aidPERSONIDDATA")[0][0],
run_sql("select max(personid) from aidPERSONIDPAPERS")[0][0])
pids = tuple(int(p) for p in pids if p != None)
if len(pids) == 2:
return max(*pids) + 1
elif len(pids) == 1:
return pids[0] + 1
else:
return 0
def get_existing_personids():
try:
pids_data = set(zip(*run_sql("select distinct personid from aidPERSONIDDATA"))[0])
except IndexError:
pids_data = set()
try:
pids_pap = set(zip(*run_sql("select distinct personid from aidPERSONIDPAPERS"))[0])
except IndexError:
pids_pap = set()
return pids_data | pids_pap
def get_existing_result_clusters():
return run_sql("select distinct personid from aidRESULTS")
def create_new_person(uid= -1, uid_is_owner=False):
'''
Create a new person. Set the uid as owner if requested.
'''
pid = get_new_personid()
if uid_is_owner:
set_personid_row(pid, 'uid', str(uid))
else:
set_personid_row(pid, 'user-created', str(uid))
return pid
def create_new_person_from_uid(uid):
return create_new_person(uid, uid_is_owner=True)
def new_person_from_signature(sig, name=None):
'''
Creates a new person from a signature.
'''
pid = get_new_personid()
add_signature(sig, name, pid)
return pid
def add_signature(sig, name, pid):
'''
Inserts a signature in personid.
'''
if not name:
name = get_name_by_bibrecref(sig)
name = create_normalized_name(split_name_parts(name))
run_sql("INSERT INTO aidPERSONIDPAPERS "
"(personid, bibref_table, bibref_value, bibrec, name) "
"VALUES (%s, %s, %s, %s, %s)"
, (pid, str(sig[0]), sig[1], sig[2], name))
def move_signature(sig, pid):
'''
Inserts a signature in personid.
'''
run_sql("update aidPERSONIDPAPERS set personid=%s "
"where bibref_table=%s and bibref_value=%s "
"and bibrec=%s and flag <> 2 and flag <> -2",
(pid,) + sig)
def find_conflicts(sig, pid):
"""
"""
return run_sql("select bibref_table, bibref_value, bibrec, flag "
"from aidPERSONIDPAPERS where "
"personid = %s and "
"bibrec = %s and "
"flag <> -2"
, (pid, sig[2]))
def update_request_ticket(person_id, tag_data_tuple, ticket_id=None):
'''
Creates / updates a request ticket for a personID
@param: personid int
@param: tag_data_tuples 'image' of the ticket: (('paper', '700:316,10'), ('owner', 'admin'), ('external_id', 'ticket_18'))
@return: ticketid
'''
#tags: rt_owner (the owner of the ticket, associating the rt_number to the transaction)
# rt_external_id
# rt_paper_cornfirm, rt_paper_reject, rt_paper_forget, rt_name, rt_email, rt_whatever
#flag: rt_number
if not ticket_id:
last_id = run_sql("select max(opt1) from aidPERSONIDDATA where personid=%s and tag like %s", (str(person_id), 'rt_%'))[0][0]
if last_id:
ticket_id = last_id + 1
else:
ticket_id = 1
else:
delete_request_ticket(person_id, ticket_id)
for d in tag_data_tuple:
run_sql("insert into aidPERSONIDDATA (personid, tag, data, opt1) "
"values (%s,%s,%s,%s)",
(str(person_id), 'rt_' + str(d[0]), str(d[1]), str(ticket_id)))
return ticket_id
def delete_request_ticket(person_id, ticket_id=None):
'''
Removes a ticket from a person_id.
If ticket_id is not provider removes all the tickets pending on a person.
'''
if ticket_id:
run_sql("delete from aidPERSONIDDATA where personid=%s and tag like %s and opt1 =%s", (str(person_id), 'rt_%', str(ticket_id)))
else:
run_sql("delete from aidPERSONIDDATA where personid=%s and tag like %s", (str(person_id), 'rt_%'))
def get_all_personids_by_name(regexpr):
return run_sql("select personid, name "
"from aidPERSONIDPAPERS "
"where name like %s",
(regexpr,))
def get_personids_by_canonical_name(target):
pid = run_sql("select personid from aidPERSONIDDATA where "
"tag='canonical_name' and data like %s", (target,))
if pid:
return run_sql("select personid, name from aidPERSONIDPAPERS "
"where personid=%s", (pid[0][0],))
else:
return []
def get_bibref_modification_status(bibref):
'''
Determines if a record attached to a person has been touched by a human
by checking the flag.
@param pid: The Person ID of the person to check the assignment from
@type pid: int
@param bibref: The paper identifier to be checked (e.g. "100:12,144")
@type bibref: string
returns [bool:human_modified, int:lcul]
'''
if not bibref:
raise ValueError("A bibref is expected!")
head, rec = bibref.split(',')
table, ref = head.split(':')
flags = run_sql("SELECT flag, lcul FROM aidPERSONIDPAPERS WHERE "
"bibref_table = %s and bibref_value = %s and bibrec = %s"
, (table, ref, rec))
if flags:
return flags[0]
else:
return (False, 0)
def get_canonical_id_from_personid(pid):
'''
Finds the person id canonical name (e.g. Ellis_J_R_1)
@param pid
@type int
@return: sql result of the request
@rtype: tuple of tuple
'''
return run_sql("SELECT data FROM aidPERSONIDDATA WHERE "
"tag = %s AND personid = %s", ('canonical_name', str(pid)))
def get_papers_status(paper):
'''
Gets the personID and flag assiciated to papers
@param papers: list of papers
@type papers: '100:7531,9024'
@return: (('data','personID','flag',),)
@rtype: tuple of tuples
'''
head, bibrec = paper.split(',')
_table, bibref = head.split(':')
rets = run_sql("select PersonID, flag "
"from aidPERSONIDPAPERS "
"where bibref_table = %s "
"and bibref_value = %s "
"and bibrec = %s"
% (head, bibrec, bibref))
return [[paper] + list(x) for x in rets]
def get_persons_from_recids(recids, return_alt_names=False,
return_all_person_papers=False):
rec_2_pid = dict()
pid_2_data = dict()
all_pids = set()
def get_canonical_name(pid):
return run_sql("SELECT data "
"FROM aidPERSONIDDATA "
"WHERE tag = %s "
"AND personid = %s",
('canonical_name', pid))
for rec in recids:
pids = run_sql("SELECT personid "
"FROM aidPERSONIDPAPERS "
"WHERE bibrec = %s "
" and flag > -2 ",
(rec,))
# for some reason python's set is faster than a mysql distinct
pids = set(p[0] for p in pids)
all_pids |= pids
rec_2_pid[rec] = list(pids)
for pid in all_pids:
pid_data = {}
canonical = get_canonical_name(pid)
#We can supposed that this person didn't have a chance to get a canonical name yet
#because it was not fully processed by it's creator. Anyway it's safe to try to create one
#before failing miserably
if not canonical:
update_personID_canonical_names([pid])
canonical = get_canonical_name(pid)
#assert len(canonical) == 1
#This condition cannot hold in case claims or update daemons are run in parallel
#with this, as it can happen that a person with papers exists for wich a canonical name
#has not been computed yet. Hence, it will be indexed next time, so it learns.
#Each person should have at most one canonical name, so:
assert len(canonical) <= 1
if len(canonical) == 1:
pid_data = {'canonical_id' : canonical[0][0]}
if return_alt_names:
names = run_sql("SELECT name "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
" and flag > -2 ",
(pid,))
names = set(n[0] for n in names)
pid_data['alternatative_names'] = list(names)
if return_all_person_papers:
recs = run_sql("SELECT bibrec "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
" and flag > -2 ",
(pid,))
recs = set(r[0] for r in recs)
pid_data['person_records'] = list(recs)
pid_2_data[pid] = pid_data
return (rec_2_pid, pid_2_data)
def get_person_db_names_count(pid, sort_by_count=True):
'''
Returns the set of name strings and count associated to a person id.
The name strings are as found in the database.
@param pid: ID of the person
@type pid: ('2',)
'''
id_2_count = run_sql("select bibref_table, bibref_value "
"from aidPERSONIDPAPERS "
"where personid = %s "
"and flag > -2", (pid,))
ref100 = [refid[1] for refid in id_2_count if refid[0] == '100']
ref700 = [refid[1] for refid in id_2_count if refid[0] == '700']
ref100_count = dict((key, len(list(data))) for key, data in groupby(sorted(ref100)))
ref700_count = dict((key, len(list(data))) for key, data in groupby(sorted(ref700)))
if ref100:
ref100_s = list_2_SQL_str(ref100, str)
id100_2_str = run_sql("select id, value "
"from bib10x "
"where id in %s"
% ref100_s)
else:
id100_2_str = tuple()
if ref700:
ref700_s = list_2_SQL_str(ref700, str)
id700_2_str = run_sql("select id, value "
"from bib70x "
"where id in %s"
% ref700_s)
else:
id700_2_str = tuple()
ret100 = [(name, ref100_count[refid]) for refid, name in id100_2_str]
ret700 = [(name, ref700_count[refid]) for refid, name in id700_2_str]
ret = ret100 + ret700
if sort_by_count:
ret = sorted(ret, key=itemgetter(1), reverse=True)
return ret
def get_person_id_from_canonical_id(canonical_id):
'''
Finds the person id from a canonical name (e.g. Ellis_J_R_1)
@param canonical_id: the canonical ID
@type canonical_id: string
@return: sql result of the request
@rtype: tuple of tuple
'''
return run_sql("SELECT personid FROM aidPERSONIDDATA WHERE "
"tag='canonical_name' AND data = %s", (canonical_id,))
def get_person_names_count(pid):
'''
Returns the set of name strings and count associated to a person id
@param pid: ID of the person
@type pid: ('2',)
@param value: value to be written for the tag
@type value: string
'''
return run_sql("select name, count(name) from aidPERSONIDPAPERS where "
"personid=%s and flag > -2 group by name", (pid,))
def get_person_db_names_set(pid):
'''
Returns the set of db_name strings associated to a person id
@param pid: ID of the person
@type pid: 2
'''
names = get_person_db_names_count(pid)
if names:
return zip(set(zip(*names)[0]))
else:
return []
def get_personids_from_bibrec(bibrec):
'''
Returns all the personids associated to a bibrec.
'''
pids = run_sql("select distinct personid from aidPERSONIDPAPERS where bibrec=%s and flag > -2", (bibrec,))
if pids:
return zip(*pids)[0]
else:
return []
def get_personids_and_papers_from_bibrecs(bibrecs, limit_by_name=None):
'''
'''
if not bibrecs:
return []
else:
bibrecs = list_2_SQL_str(bibrecs)
if limit_by_name:
try:
surname = split_name_parts(limit_by_name)[0]
except IndexError:
surname = None
else:
surname = None
if not surname:
data = run_sql("select personid,bibrec from aidPERSONIDPAPERS where bibrec in %s" % (bibrecs,))
else:
surname = split_name_parts(limit_by_name)[0]
data = run_sql(("select personid,bibrec from aidPERSONIDPAPERS where bibrec in %s "
"and name like " % bibrecs) + ' %s ', (surname + '%',))
pidlist = [(k, set([s[1] for s in d]))
for k, d in groupby(sorted(data, key=lambda x:x[0]), key=lambda x:x[0])]
pidlist = sorted(pidlist, key=lambda x:len(x[1]), reverse=True)
return pidlist
def get_person_bibrecs(pid):
'''
Returns bibrecs associated with a personid
@param pid: integer personid
@return [bibrec1,...,bibrecN]
'''
papers = run_sql("select bibrec from aidPERSONIDPAPERS where personid=%s", (str(pid),))
if papers:
return list(set(zip(*papers)[0]))
else:
return []
def get_person_papers(pid, flag,
show_author_name=False,
show_title=False,
show_rt_status=False,
show_affiliations=False,
show_date=False,
show_experiment=False):
query = "bibref_table, bibref_value, bibrec, flag"
if show_author_name:
query += ", name"
all_papers = run_sql("SELECT " + query + " "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
"AND flag >= %s",
(pid, flag))
def format_paper(paper):
bibrefrec = "%s:%d,%d" % paper[:3]
ret = {'data' : bibrefrec,
'flag' : paper[3]
}
if show_author_name:
ret['authorname'] = paper[4]
if show_title:
ret['title'] = ""
title = get_title_from_rec(paper[2])
if title:
ret['title'] = (title, )
if show_rt_status:
rt_count = run_sql("SELECT count(personid) "
"FROM aidPERSONIDDATA WHERE "
"tag like 'rt_%%' and data = %s"
, (bibrefrec,))
ret['rt_status'] = (rt_count[0][0] > 0)
if show_affiliations:
tag = '%s__u' % paper[0]
ret['affiliation'] = get_grouped_records(paper[:3], tag)[tag]
if show_date:
ret['date'] = []
date_id = run_sql("SELECT id_bibxxx "
"FROM bibrec_bib26x "
"WHERE id_bibrec = %s "
, (paper[2],))
if date_id:
date_id_s = list_2_SQL_str(date_id, lambda x: x[0])
date = run_sql("SELECT value "
"FROM bib26x "
"WHERE id in %s "
"AND tag = %s"
% (date_id_s, "'269__c'"))
if date:
ret['date'] = zip(*date)[0]
if show_experiment:
ret['experiment'] = []
experiment_id = run_sql("SELECT id_bibxxx "
"FROM bibrec_bib69x "
"WHERE id_bibrec = %s "
, (paper[2],))
if experiment_id:
experiment_id_s = list_2_SQL_str(experiment_id, lambda x: x[0])
experiment = run_sql("SELECT value "
"FROM bib69x "
"WHERE id in %s "
"AND tag = %s"
% (experiment_id_s, "'693__e'"))
if experiment:
ret['experiment'] = zip(*experiment)[0]
return ret
return [format_paper(paper) for paper in all_papers]
def get_persons_with_open_tickets_list():
'''
Finds all the persons with open tickets and returns pids and count of tickets
@return: [[pid, ticket_count]]
'''
return run_sql("select personid, count(distinct opt1) from "
"aidPERSONIDDATA where tag like 'rt_%' group by personid")
def get_request_ticket(person_id, ticket_id=None):
'''
Retrieves one or many requests tickets from a person
@param: person_id: person id integer
@param: matching: couple of values to match ('tag', 'value')
@param: ticket_id: ticket id (flag) value
@returns: [[[('tag', 'value')], ticket_id]]
[[[('a', 'va'), ('b', 'vb')], 1L], [[('b', 'daOEIaoe'), ('a', 'caaoOUIe')], 2L]]
'''
if ticket_id:
tstr = " and opt1='%s' " % ticket_id
else:
tstr = " "
tickets = run_sql("select tag,data,opt1 from aidPERSONIDDATA where personid=%s and "
" tag like 'rt_%%' " + tstr , (person_id,))
return [[[(s[0][3:], s[1]) for s in d], k] for k, d in groupby(sorted(tickets, key=lambda k: k[2]), key=lambda k: k[2])]
def insert_user_log(userinfo, personid, action, tag, value, comment='', transactionid=0, timestamp=None):
'''
Instert log entries in the user log table.
For example of entres look at the table generation script.
@param userinfo: username or user identifier
@type: string
@param personid: personid involved in the transaction
@type: longint
@param action: action type
@type: string
@param tag: tag
@type: string
@param value: value for the transaction
@type: string
@param comment: optional comment for the transaction
@type: string
@param transactionid: optional id for the transaction
@type: longint
@return: the transactionid
@rtype: longint
'''
# if transactionid == 0:
# transactionid = max(run_sql('SELECT MAX(transactionid) FROM `aidUSERINPUTLOG`')[0][0], -1) + 1
if not timestamp:
timestamp = run_sql('select now()')[0][0]
# run_sql('insert into aidUSERINPUTLOG (transactionid,timestamp,userinfo,personid,action,tag,value,comment) values '
# '(%(transactionid)s,%(timestamp)s,%(userinfo)s,%(personid)s,%(action)s,%(tag)s,%(value)s,%(comment)s)',
# ({'transactionid':str(transactionid),
# 'timestamp':timestamp.timestamp,
# 'userinfo':str(userinfo),
# 'personid':str(personid),
# 'action':str(action),
# 'tag':str(tag),
# 'value':str(value),
# 'comment':str(comment)}))
run_sql('insert into aidUSERINPUTLOG '
'(transactionid,timestamp,userinfo,personid,action,tag,value,comment) values '
'(%s,%s,%s,%s,%s,%s,%s,%s)',
(transactionid, timestamp, userinfo, personid,
action, tag, value, comment))
return transactionid
def person_bibref_is_touched_old(pid, bibref):
'''
Determines if a record attached to a person has been touched by a human
by checking the flag.
@param pid: The Person ID of the person to check the assignment from
@type pid: int
@param bibref: The paper identifier to be checked (e.g. "100:12,144")
@type bibref: string
'''
bibref, rec = bibref.split(",")
table, ref = bibref.split(":")
flag = run_sql("SELECT flag "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
"AND bibref_table = %s "
"AND bibref_value = %s "
"AND bibrec = %s"
, (pid, table, ref, rec))
try:
flag = flag[0][0]
except (IndexError):
return False
if not flag:
return False
elif -2 < flag < 2:
return False
else:
return True
def confirm_papers_to_person(pid, papers, user_level=0):
'''
Confirms the relationship between pid and paper, as from user input.
@param pid: id of the person
@type pid: ('2',)
@param papers: list of papers to confirm
@type papers: (('100:7531,9024',),)
@param gather_list: list to store the pids to be updated rather than
calling update_personID_names_string_set
@typer gather_list: set([('2',), ('3',)])
'''
for p in papers:
bibref, rec = p[0].split(",")
rec = int(rec)
table, ref = bibref.split(":")
ref = int(ref)
run_sql("delete from aidPERSONIDPAPERS where personid=%s and bibrec=%s", (pid[0], rec))
run_sql("delete from aidPERSONIDPAPERS where bibref_table=%s and "
" bibref_value = %s and bibrec=%s",
(table, ref, rec))
add_signature([table, ref, rec], None, pid[0])
run_sql("update aidPERSONIDPAPERS "
"set personid = %s "
", flag = %s "
", lcul = %s "
"where bibref_table = %s "
"and bibref_value = %s "
"and bibrec = %s"
, (str(pid[0]), '2', user_level,
table, ref, rec))
update_personID_canonical_names(pid)
def reject_papers_from_person(pid, papers, user_level=0):
'''
Confirms the negative relationship between pid and paper, as from user input.
@param pid: id of the person
@type pid: integer
@param papers: list of papers to confirm
@type papers: ('100:7531,9024',)
'''
new_pid = get_new_personid()
for p in papers:
brr, rec = p.split(",")
table, ref = brr.split(':')
sig = (table, ref, rec)
records = personid_name_from_signature(sig)
assert(records)
fpid, name = records[0]
assert fpid == pid
run_sql("INSERT INTO aidPERSONIDPAPERS "
"(personid, bibref_table, bibref_value, bibrec, name, flag, lcul) "
"VALUES (%s, %s, %s, %s, %s, %s, %s)"
, (pid, table, ref, rec, name, -2, user_level))
move_signature(sig, new_pid)
update_personID_canonical_names((pid,))
def reset_papers_flag(pid, papers):
'''
Resets the flag associated to the papers to '0'
@param papers: list of papers to confirm
@type papers: (('100:7531,9024',),)
@param gather_list: list to store the pids to be updated rather than
calling update_personID_names_string_set
@typer gather_list: set([('2',), ('3',)])
'''
for p in papers:
bibref, rec = p[0].split(",")
table, ref = bibref.split(":")
run_sql("update aidPERSONIDPAPERS "
"set flag = %s, lcul = %s "
"where bibref_table = %s "
"and bibref_value = %s "
"and bibrec = %s" ,
('0', '0',
table, ref, rec))
def user_can_modify_data(uid, pid):
'''
Return True if the uid can modify data of this personID, false otherwise.
@param uid: the user id
@type: int
@param pid: the person id
@type: int
@return: can user mofidfy data?
@rtype: boolean
'''
pid_uid = run_sql("select data from aidPERSONIDDATA where tag = %s"
" and personid = %s", ('uid', str(pid)))
if len(pid_uid) >= 1 and str(uid) == str(pid_uid[0][0]):
rights = bconfig.CLAIMPAPER_CHANGE_OWN_DATA
else:
rights = bconfig.CLAIMPAPER_CHANGE_OTHERS_DATA
return acc_authorize_action(uid, rights)[0] == 0
def get_possible_bibrecref(names, bibrec, always_match=False):
'''
Returns a list of bibrefs for which the surname is matching
@param names: list of names strings
@param bibrec: bibrec number
@param always_match: match with all the names (full bibrefs list)
'''
splitted_names = [split_name_parts(n) for n in names]
bibrec_names_100 = run_sql("select o.id, o.value from bib10x o, "
"(select i.id_bibxxx as iid from bibrec_bib10x i "
"where id_bibrec=%s) as dummy "
"where o.tag='100__a' AND o.id = dummy.iid",
(str(bibrec),))
bibrec_names_700 = run_sql("select o.id, o.value from bib70x o, "
"(select i.id_bibxxx as iid from bibrec_bib70x i "
"where id_bibrec=%s) as dummy "
"where o.tag='700__a' AND o.id = dummy.iid",
(str(bibrec),))
# bibrec_names_100 = run_sql("select id,value from bib10x where tag='100__a' and id in "
# "(select id_bibxxx from bibrec_bib10x where id_bibrec=%s)",
# (str(bibrec),))
# bibrec_names_700 = run_sql("select id,value from bib70x where tag='700__a' and id in "
# "(select id_bibxxx from bibrec_bib70x where id_bibrec=%s)",
# (str(bibrec),))
bibreflist = []
for b in bibrec_names_100:
spb = split_name_parts(b[1])
for n in splitted_names:
if (n[0].lower() == spb[0].lower()) or always_match:
if ['100:' + str(b[0]), b[1]] not in bibreflist:
bibreflist.append(['100:' + str(b[0]), b[1]])
for b in bibrec_names_700:
spb = split_name_parts(b[1])
for n in splitted_names:
if (n[0].lower() == spb[0].lower()) or always_match:
if ['700:' + str(b[0]), b[1]] not in bibreflist:
bibreflist.append(['700:' + str(b[0]), b[1]])
return bibreflist
def user_can_modify_paper(uid, paper):
'''
Return True if the uid can modify this paper, false otherwise.
If the paper is assigned more then one time (from algorithms) consider the most privileged
assignment.
@param uid: the user id
@type: int
@param paper: the paper bibref,bibrec pair x00:1234,4321
@type: str
@return: can user mofidfy paper attribution?
@rtype: boolean
'''
bibref, rec = paper.split(",")
table, ref = bibref.split(":")
prow = run_sql("select personid, lcul from aidPERSONIDPAPERS "
"where bibref_table = %s and bibref_value = %s and bibrec = %s "
"order by lcul desc limit 0,1",
(table, ref, rec))
if len(prow) == 0:
return ((acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS)[0] == 0) or
(acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS)[0] == 0))
min_req_acc_n = int(prow[0][1])
req_acc = resolve_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS)
pid_uid = run_sql("select data from aidPERSONIDDATA where tag = %s and personid = %s", ('uid', str(prow[0][0])))
if len(pid_uid) > 0:
if (str(pid_uid[0][0]) != str(uid)) and min_req_acc_n > 0:
req_acc = resolve_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS)
if min_req_acc_n < req_acc:
min_req_acc_n = req_acc
min_req_acc = resolve_paper_access_right(min_req_acc_n)
return (acc_authorize_action(uid, min_req_acc)[0] == 0) and (resolve_paper_access_right(min_req_acc) >= min_req_acc_n)
def resolve_paper_access_right(acc):
'''
Given a string or an integer, resolves to the corresponding integer or string
If asked for a wrong/not present parameter falls back to the minimum privilege.
'''
access_dict = {bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE: 0,
bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS: 25,
bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS: 50}
if isinstance(acc, str):
try:
return access_dict[acc]
except:
return 0
inverse_dict = dict([[v, k] for k, v in access_dict.items()])
lower_accs = [a for a in inverse_dict.keys() if a <= acc]
try:
return inverse_dict[max(lower_accs)]
except:
return bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE
def get_recently_modified_record_ids(date):
'''
Returns the bibrecs with modification date more recent then date.
@param date: date
'''
return [p[0] for p in run_sql(
"select id from bibrec where modification_date > %s", (date,))]
def filter_modified_record_ids(bibrecs, date):
'''
Returns the bibrecs with modification date before the date.
@param date: date
'''
return ifilter(
lambda x: run_sql("select count(*) from bibrec "
"where id = %s and "
"modification_date < %s"
, (x[2], date))[0][0]
, bibrecs)
def get_cached_author_page(pageparam):
'''
Return cached authorpage
@param: pageparam (int personid)
@return (id, 'authorpage_cache', personid, authorpage_html, date_cached)
'''
#TABLE: id, tag, identifier, data, date
caches = run_sql("select id, object_name, object_key, object_value, last_updated \
from aidCACHE \
where object_name='authorpage_cache' and object_key=%s",
(str(pageparam),))
if len(caches) >= 1:
return caches[0]
else:
return []
def delete_cached_author_page(personid):
'''
Deletes from the author page cache the page concerning one person
'''
run_sql("delete from aidCACHE where object_name='authorpage_cache' and object_key=%s", (str(personid),))
def update_cached_author_page_timestamp(pageparam):
'''
Updates cached author page timestamp
@param pageparam: int personid
'''
#TABLE: id, tag, identifier, data, date
run_sql("update aidCACHE set last_updated=now() where object_name='authorpage_cache' and object_key=%s", (str(pageparam),))
def update_cached_author_page(pageparam, page):
'''
Updates cached author page, deleting old caches for same pageparam
@param pageparam: int personid
@param page: string html authorpage
'''
#TABLE: id, tag, identifier, data, date
run_sql("delete from aidCACHE where object_name='authorpage_cache' and object_key=%s", (str(pageparam),))
run_sql("insert into aidCACHE values (Null,'authorpage_cache',%s,%s,now())", (str(pageparam), str(page)))
def get_user_log(transactionid='', userinfo='', personID='', action='', tag='', value='', comment='', only_most_recent=False):
'''
Get user log table entry matching all the given parameters; all of them are optional.
IF no parameters are given retuns the complete log table
@param transactionid: id of the transaction
@param userinfo: user name or identifier
@param personid: id of the person involved
@param action: action
@param tag: tag
@param value: value
@param comment: comment
'''
sql_query = ('select id,transactionid,timestamp,userinfo,personid,action,tag,value,comment ' +
'from aidUSERINPUTLOG where 1 ')
if transactionid:
sql_query += ' and transactionid=\'' + str(transactionid) + '\''
if userinfo:
sql_query += ' and userinfo=\'' + str(userinfo) + '\''
if personID:
sql_query += ' and personid=\'' + str(personID) + '\''
if action:
sql_query += ' and action=\'' + str(action) + '\''
if tag:
sql_query += ' and tag=\'' + str(tag) + '\''
if value:
sql_query += ' and value=\'' + str(value) + '\''
if comment:
sql_query += ' and comment=\'' + str(comment) + '\''
if only_most_recent:
sql_query += ' order by timestamp desc limit 0,1'
return run_sql(sql_query)
def list_2_SQL_str(items, f=lambda x: x):
"""
Concatenates all items in items to a sql string using f.
@param items: a set of items
@param type items: X
@param f: a function which transforms each item from items to string
@param type f: X:->str
@return: "(x1, x2, x3, ... xn)" for xi in items
@return type: string
"""
strs = (str(f(x)) for x in items)
return "(%s)" % ", ".join(strs)
def get_authors_from_paper(paper):
'''
selects all author bibrefs by a given papers
'''
fullbibrefs100 = run_sql("select id_bibxxx from bibrec_bib10x where id_bibrec=%s", (paper,))
if len(fullbibrefs100) > 0:
fullbibrefs100str = list_2_SQL_str(fullbibrefs100, lambda x: str(x[0]))
return run_sql("select id from bib10x where tag='100__a' and id in %s" % (fullbibrefs100str,))
return tuple()
def get_coauthors_from_paper(paper):
'''
selects all coauthor bibrefs by a given papers
'''
fullbibrefs700 = run_sql("select id_bibxxx from bibrec_bib70x where id_bibrec=%s", (paper,))
if len(fullbibrefs700) > 0:
fullbibrefs700str = list_2_SQL_str(fullbibrefs700, lambda x: str(x[0]))
return run_sql("select id from bib70x where tag='700__a' and id in %s" % (fullbibrefs700str,))
return tuple()
def get_bibrefrec_subset(table, papers, refs):
table = "bibrec_bib%sx" % str(table)[:-1]
contents = run_sql("select id_bibrec, id_bibxxx from %s" % table)
papers = set(papers)
refs = set(refs)
# yes, there are duplicates and we must set them
return set(ifilter(lambda x: x[0] in papers and x[1] in refs, contents))
def get_deleted_papers():
return run_sql("select o.id_bibrec from bibrec_bib98x o, "
"(select i.id as iid from bib98x i "
"where value = 'DELETED' "
"and tag like '980__a') as dummy "
"where o.id_bibxxx = dummy.iid")
#bibauthorid_maintenance personid update private methods
def update_personID_canonical_names(persons_list=None, overwrite=False, suggested=''):
'''
Updates the personID table creating or updating canonical names for persons
@param: persons_list: persons to consider for the update (('1'),)
@param: overwrite: if to touch already existing canonical names
@param: suggested: string to suggest a canonical name for the person
'''
if not persons_list:
persons_list = [x[0] for x in run_sql('select distinct personid from aidPERSONIDPAPERS')]
for idx, pid in enumerate(persons_list):
update_status(float(idx) / float(len(persons_list)), "Updating canonical_names...")
current_canonical = run_sql("select data from aidPERSONIDDATA where "
"personid=%s and tag=%s", (pid, 'canonical_name'))
if overwrite or len(current_canonical) == 0:
names = get_person_names_count(pid)
names = sorted(names, key=lambda k: k[1], reverse=True)
if len(names) < 1 and not suggested:
continue
else:
if suggested:
canonical_name = suggested
else:
canonical_name = create_canonical_name(names[0][0])
run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s",
(pid, 'canonical_name'))
existing_cnames = run_sql("select data from aidPERSONIDDATA "
"where tag=%s and data like %s",
('canonical_name', str(canonical_name) + '%'))
existing_cnames = set(name[0] for name in existing_cnames)
for i in count(1):
cur_try = canonical_name + '.' + str(i)
if cur_try not in existing_cnames:
canonical_name = cur_try
break
run_sql("insert into aidPERSONIDDATA (personid, tag, data) values (%s,%s,%s) ",
(pid, 'canonical_name', canonical_name))
update_status_final("Updating canonical_names finished.")
def personid_get_recids_affected_since(last_timestamp):
'''
Returns a list of recids which have been manually changed since timestamp
@TODO: extend the system to track and signal even automatic updates (unless a full reindex is
acceptable in case of magic automatic update)
@param: last_timestamp: last update, datetime.datetime
'''
vset = set(int(v[0].split(',')[1]) for v in run_sql(
"select distinct value from aidUSERINPUTLOG "
"where timestamp > %s", (last_timestamp,))
if ',' in v[0] and ':' in v[0])
pids = set(int(p[0]) for p in run_sql(
"select distinct personid from aidUSERINPUTLOG "
"where timestamp > %s", (last_timestamp,))
if p[0] > 0)
if pids:
pids_s = list_2_SQL_str(pids)
vset |= set(int(b[0]) for b in run_sql(
"select bibrec from aidPERSONIDPAPERS "
"where personid in %s" % pids_s))
return list(vset) # I'm not sure about this cast. It might work without it.
def get_all_paper_records(pid, claimed_only=False):
if not claimed_only:
return run_sql("SELECT distinct bibrec FROM aidPERSONIDPAPERS WHERE personid = %s", (str(pid),))
else:
return run_sql("SELECT distinct bibrec FROM aidPERSONIDPAPERS WHERE "
"personid = %s and flag=2 or flag=-2", (str(pid),))
def get_all_names_from_personid():
return ((name[0][0], set(n[1] for n in name), len(name))
for name in (run_sql(
"SELECT personid, name "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
"AND flag > -2", p)
for p in run_sql(
"SELECT DISTINCT personid "
"FROM aidPERSONIDPAPERS "
"WHERE flag > -2")
))
def get_grouped_records(bibrefrec, *args):
'''
By a given bibrefrec: mark:ref,rec this function will scan
bibmarkx table and extract all records with tag in argc, which
are grouped togerther with this bibrec.
Returns a dictionary with { tag : [extracted_values] }
if the values is not found.
@type bibrefrec: (mark(int), ref(int), rec(int))
'''
table, ref, rec = bibrefrec
target_table = "bib%sx" % (str(table)[:-1])
mapping_table = "bibrec_%s" % target_table
group_id = run_sql("SELECT field_number "
"FROM %s "
"WHERE id_bibrec = %d "
"AND id_bibxxx = %d" %
(mapping_table, rec, ref))
if len(group_id) == 0:
# unfortunately the mapping is not found, so
# we cannot find anything
return dict((arg, []) for arg in args)
elif len(group_id) == 1:
# All is fine
field_number = group_id[0][0]
else:
# sounds bad, but ignore the error
field_number = group_id[0][0]
grouped = run_sql("SELECT id_bibxxx "
"FROM %s "
"WHERE id_bibrec = %d "
"AND field_number = %d" %
(mapping_table, rec, int(field_number)))
assert len(grouped) > 0
grouped_s = list_2_SQL_str(grouped, lambda x: str(x[0]))
ret = {}
for arg in args:
qry = run_sql("SELECT value "
"FROM %s "
"WHERE tag LIKE '%s' "
"AND id IN %s" %
(target_table, arg, grouped_s))
ret[arg] = [q[0] for q in qry]
return ret
def get_name_by_bibrecref(bib):
'''
@param bib: bibrefrec or bibref
@type bib: (mark, bibref, bibrec) OR (mark, bibref)
'''
table = "bib%sx" % (str(bib[0])[:-1])
refid = bib[1]
tag = "%s__a" % bib[0]
ret = run_sql("select value from %s where id = '%s' and tag = '%s'" % (table, refid, tag))
# if zero - check if the garbage collector has run
assert len(ret) == 1
return ret[0][0]
def get_collaboration(bibrec):
bibxxx = run_sql("select id_bibxxx from bibrec_bib71x where id_bibrec = %s", (str(bibrec),))
if len(bibxxx) == 0:
return ()
bibxxx = list_2_SQL_str(bibxxx, lambda x: str(x[0]))
ret = run_sql("select value from bib71x where id in %s and tag like '%s'" % (bibxxx, "710__g"))
return [r[0] for r in ret]
def get_key_words(bibrec):
if bconfig.CFG_ADS_SITE:
bibxxx = run_sql("select id_bibxxx from bibrec_bib65x where id_bibrec = %s", (str(bibrec),))
else:
bibxxx = run_sql("select id_bibxxx from bibrec_bib69x where id_bibrec = %s", (str(bibrec),))
if len(bibxxx) == 0:
return ()
bibxxx = list_2_SQL_str(bibxxx, lambda x: str(x[0]))
if bconfig.CFG_ADS_SITE:
ret = run_sql("select value from bib69x where id in %s and tag like '%s'" % (bibxxx, "6531_a"))
else:
ret = run_sql("select value from bib69x where id in %s and tag like '%s'" % (bibxxx, "695__a"))
return [r[0] for r in ret]
def get_all_authors(bibrec):
bibxxx_1 = run_sql("select id_bibxxx from bibrec_bib10x where id_bibrec = %s", (str(bibrec),))
bibxxx_7 = run_sql("select id_bibxxx from bibrec_bib70x where id_bibrec = %s", (str(bibrec),))
if bibxxx_1:
bibxxxs_1 = list_2_SQL_str(bibxxx_1, lambda x: str(x[0]))
authors_1 = run_sql("select value from bib10x where tag = '%s' and id in %s" % ('100__a', bibxxxs_1,))
else:
authors_1 = []
if bibxxx_7:
bibxxxs_7 = list_2_SQL_str(bibxxx_7, lambda x: str(x[0]))
authors_7 = run_sql("select value from bib70x where tag = '%s' and id in %s" % ('700__a', bibxxxs_7,))
else:
authors_7 = []
return [a[0] for a in authors_1] + [a[0] for a in authors_7]
def get_title_from_rec(rec):
"""
Returns the name of the paper like str if found.
Otherwise returns None.
"""
title_id = run_sql("SELECT id_bibxxx "
"FROM bibrec_bib24x "
"WHERE id_bibrec = %s",
(rec,))
if title_id:
title_id_s = list_2_SQL_str(title_id, lambda x: x[0])
title = run_sql("SELECT value "
"FROM bib24x "
"WHERE id in %s "
"AND tag = '245__a'"
% title_id_s)
if title:
return title[0][0]
def get_bib10x():
return run_sql("select id, value from bib10x where tag like %s", ("100__a",))
def get_bib70x():
return run_sql("select id, value from bib70x where tag like %s", ("700__a",))
class bib_matrix:
'''
This small class contains the sparse matrix
and encapsulates it.
'''
# please increment this value every time you
# change the output of the comparison functions
current_comparison_version = 9
special_items = ((None, -3., 'N'), ('+', -2., '+'), ('-', -1., '-'))
special_symbols = dict((x[0], (x[1], x[2])) for x in special_items)
special_numbers = dict((x[1], (x[0], x[2])) for x in special_items)
special_strings = dict((x[2], (x[0], x[1])) for x in special_items)
def __init__(self, cluster_set=None):
if cluster_set:
bibs = chain(*(cl.bibs for cl in cluster_set.clusters))
self._bibmap = dict((b[1], b[0]) for b in enumerate(bibs))
width = len(self._bibmap)
size = ((width - 1) * width) / 2
self._matrix = bib_matrix.create_empty_matrix(size)
else:
self._bibmap = dict()
@staticmethod
def create_empty_matrix(lenght):
ret = numpy.ndarray(shape=(lenght, 2), dtype=float, order='C')
ret.fill(bib_matrix.special_symbols[None][0])
return ret
def _resolve_entry(self, bibs):
entry = sorted(self._bibmap[bib] for bib in bibs)
assert entry[0] < entry[1]
return entry[0] + ((entry[1] - 1) * entry[1]) / 2
def __setitem__(self, bibs, val):
entry = self._resolve_entry(bibs)
if val in self.special_symbols:
num = self.special_symbols[val][0]
val = (num, num)
self._matrix[entry] = val
def __getitem__(self, bibs):
entry = self._resolve_entry(bibs)
ret = self._matrix[entry]
if ret[0] in self.special_numbers:
return self.special_numbers[ret[0]][0]
return ret[0], ret[1]
def __contains__(self, bib):
return bib in self._bibmap
def get_keys(self):
return self._bibmap.keys()
@staticmethod
def __pickle_tuple(tupy):
'''
tupy can be a very special iterable. It may contain:
* (float, float)
* None
* '+', '-' or '?'
'''
def to_str(elem):
if elem[0] in bib_matrix.special_numbers:
return "%s" % bib_matrix.special_numbers[elem[0]][1]
return "%.2f:%.2f" % (elem[0], elem[1])
return "|".join(imap(to_str, tupy))
@staticmethod
def __unpickle_tuple(tupy):
'''
tupy must be an object created by pickle_tuple.
'''
def from_str(elem):
if elem in bib_matrix.special_strings:
nummy = bib_matrix.special_strings[elem][1]
return (nummy, nummy)
fls = elem.split(":")
assert len(fls) == 2
return (float(fls[0]), float(fls[1]))
strs = tupy.split("|")
if strs == ['']:
strs = []
ret = bib_matrix.create_empty_matrix(len(strs))
for i, stri in enumerate(strs):
if i % 100000 == 0:
update_status(float(i) / len(strs), "Loading the cache...")
ret[i][0], ret[i][1] = from_str(stri)
update_status_final("Probability matrix loaded.")
return ret
def load(self, name):
'''
This method will load the matrix from the
database.
'''
row = run_sql("select bibmap, matrix "
"from aidPROBCACHE "
"where cluster like %s",
(name,))
if len(row) == 0:
self._bibmap = dict()
return False
elif len(row) == 1:
bibmap_vs = zlib.decompress(row[0][0])
bibmap_v = cPickle.loads(bibmap_vs)
rec_v, self.creation_time, self._bibmap = bibmap_v
if (rec_v != bib_matrix.current_comparison_version or
bib_matrix.current_comparison_version < 0): # you can use negative
# version to recalculate
self._bibmap = dict()
return False
matrix_s = zlib.decompress(row[0][1])
self._matrix = bib_matrix.__unpickle_tuple(matrix_s)
if self._bibmap and self._matrix != None:
if len(self._bibmap) * (len(self._bibmap) - 1) / 2 != len(self._matrix):
print >> sys.stderr, ("Error: aidPROBCACHE is corrupted! "
"Cluster %s has bibmap with %d bibs, "
"but matrix with %d entries."
% (name, len(self._bibmap), len(self._matrix)))
print >> sys.stderr, "Try to increase max_packet_size."
assert False, "Bibmap: %d, Matrix %d" % (len(self._bibmap), len(self._matrix))
return False
return True
else:
self._bibmap = dict()
return False
else:
assert False, "aidPROBCACHE is corrupted"
self._bibmap = dict()
return False
def store(self, name, creation_time):
bibmap_v = (bib_matrix.current_comparison_version, creation_time, self._bibmap)
bibmap_vs = cPickle.dumps(bibmap_v)
bibmap_vsc = zlib.compress(bibmap_vs)
matrix_s = bib_matrix.__pickle_tuple(self._matrix)
matrix_sc = zlib.compress(matrix_s)
run_sql("delete from aidPROBCACHE where cluster like %s", (name,))
run_sql("insert low_priority "
"into aidPROBCACHE "
"set cluster = %s, "
"bibmap = %s, "
"matrix = %s",
(name, bibmap_vsc, matrix_sc))
def delete_paper_from_personid(rec):
'''
Deletes all information in PERSONID about a given paper
'''
run_sql("delete from aidPERSONIDPAPERS where bibrec = %s", (rec,))
def get_signatures_from_rec(bibrec):
'''
Retrieves all information in PERSONID
about a given bibrec.
'''
return run_sql("select personid, bibref_table, bibref_value, bibrec, name "
"from aidPERSONIDPAPERS where bibrec = %s"
, (bibrec,))
def modify_signature(oldref, oldrec, newref, newname):
'''
Modifies a signature in aidPERSONIDpapers.
'''
return run_sql("UPDATE aidPERSONIDPAPERS "
"SET bibref_table = %s, bibref_value = %s, name = %s "
"WHERE bibref_table = %s AND bibref_value = %s AND bibrec = %s"
, (str(newref[0]), newref[1], newname,
str(oldref[0]), oldref[1], oldrec))
def find_pids_by_name(name):
'''
Finds names and personids by a prefix name.
'''
return set(run_sql("SELECT personid, name "
"FROM aidPERSONIDPAPERS "
"WHERE name like %s"
, (name + ',%',)))
def find_pids_by_exact_name(name):
"""
Finds names and personids by a name.
"""
return set(run_sql("SELECT personid "
"FROM aidPERSONIDPAPERS "
"WHERE name = %s"
, (name,)))
def remove_sigs(signatures):
'''
Removes records from aidPERSONIDPAPERS
'''
for sig in signatures:
run_sql("DELETE FROM aidPERSONIDPAPERS "
"WHERE bibref_table like %s AND bibref_value = %s AND bibrec = %s"
, (str(sig[0]), sig[1], sig[2]))
def remove_personid_papers(pids):
'''
Removes all signatures from aidPERSONIDPAPERS with pid in pids
'''
if pids:
run_sql("delete from aidPERSONIDPAPERS where personid in %s"
% list_2_SQL_str(pids))
def get_full_personid_papers(table_name="`aidPERSONIDPAPERS`"):
'''
Get all columns and rows from aidPERSONIDPAPERS
or any other table with the same structure.
'''
return run_sql("select personid, bibref_table, "
"bibref_value, bibrec, name, flag, "
"lcul from %s" % table_name)
def get_full_results():
'''
Depricated. Should be removed soon.
'''
return run_sql("select personid, bibref_table, bibref_value, bibrec "
"from aidRESULTS")
def get_lastname_results(last_name):
'''
Returns rows from aidRESULTS which share a common last name.
'''
return run_sql("select personid, bibref_table, bibref_value, bibrec "
"from aidRESULTS "
"where personid like '" + last_name + ".%'")
def get_full_personid_data(table_name="`aidPERSONIDDATA`"):
'''
Get all columns and rows from aidPERSONIDDATA
or any other table with the same structure.
'''
return run_sql("select personid, tag, data, "
"opt1, opt2, opt3 from %s" % table_name)
def get_wrong_names():
'''
Returns a generator with all wrong names in aidPERSONIDPAPERS.
Every element is (table, ref, correct_name).
'''
bib100 = dict(((x[0], create_normalized_name(split_name_parts(x[1]))) for x in get_bib10x()))
bib700 = dict(((x[0], create_normalized_name(split_name_parts(x[1]))) for x in get_bib70x()))
pidnames100 = run_sql("select distinct bibref_value, name from aidPERSONIDPAPERS "
" where bibref_table='100'")
pidnames700 = run_sql("select distinct bibref_value, name from aidPERSONIDPAPERS "
" where bibref_table='700'")
wrong100 = set(('100', x[0], bib100.get(x[0], None)) for x in pidnames100 if x[1] != bib100.get(x[0], None))
wrong700 = set(('700', x[0], bib700.get(x[0], None)) for x in pidnames700 if x[1] != bib700.get(x[0], None))
total = len(wrong100) + len(wrong700)
return chain(wrong100, wrong700), total
def check_personid_papers(output_file=None):
'''
Checks all invariants of personid.
Writes in stdout if output_file if False.
'''
if output_file:
fp = open(output_file, "w")
printer = lambda x: fp.write(x + '\n')
else:
printer = bibauthor_print
checkers = (check_duplicated_papers,
check_duplicated_signatures,
check_wrong_names,
check_canonical_names,
check_empty_personids,
check_wrong_rejection,
# check_claim_ispireid_contradiction,
)
# Avoid writing f(a) or g(a), because one of the calls
# might be optimized.
return all([check(printer) for check in checkers])
def check_duplicated_papers(printer):
ret = True
pids = run_sql("select distinct personid from aidPERSONIDPAPERS")
for pid in pids:
pid = pid[0]
recs = run_sql("select bibrec from aidPERSONIDPAPERS where personid = %s and flag <> %s", (pid, -2))
recs = [rec[0] for rec in recs]
for rec in set(recs):
recs.remove(rec)
if recs:
ret = False
printer("Person %d has duplicated papers: %s" % (pid, str(tuple(set(recs)))))
return ret
def check_duplicated_signatures(printer):
ret = True
recs = run_sql("select distinct bibrec from aidPERSONIDPAPERS")
for rec in recs:
rec = rec[0]
refs = list(run_sql("select bibref_table, bibref_value from aidPERSONIDPAPERS where bibrec = %s and flag > %s", (rec, "-2")))
for ref in set(refs):
refs.remove(ref)
if refs:
ret = False
refs = sorted(refs)
refs = groupby(refs)
refs = ["Found %s:%s %d times." % (key[0], key[1], len(list(data)) + 1) for key, data in refs]
printer("Paper %d has duplicated signatures:" % rec)
for ref in refs:
printer("\t%s" % ref)
return ret
def check_wrong_names(printer):
ret = True
wrong_names, number = get_wrong_names()
if number > 0:
ret = False
printer("%d corrupted names in aidPERSONIDPAPERS." % number)
for wrong_name in wrong_names:
if wrong_name[2]:
printer("Outdated name, '%s'(%s:%d)." % (wrong_name[2], wrong_name[0], wrong_name[1]))
else:
printer("Invalid id(%s:%d)." % (wrong_name[0], wrong_name[1]))
return ret
def check_canonical_names(printer):
ret = True
pid_cn = run_sql("select personid, data from aidPERSONIDDATA where tag = %s", ('canonical_name',))
pid_2_cn = dict((k, len(list(d))) for k, d in groupby(sorted(pid_cn, key=itemgetter(0)), key=itemgetter(0)))
for pid in get_existing_personids():
canon = pid_2_cn.get(pid, 0)
if canon != 1:
if canon == 0:
papers = run_sql("select count(*) from aidPERSONIDPAPERS where personid = %s", (pid,))[0][0]
if papers != 0:
printer("Personid %d does not have a canonical name, but have %d papers." % (pid, papers))
ret = False
else:
printer("Personid %d has %d canonical names.", (pid, canon))
ret = False
return ret
def check_empty_personids(printer):
ret = True
paper_pids = set(p[0] for p in run_sql("select personid from aidPERSONIDPAPERS"))
data_pids = set(p[0] for p in run_sql("select personid from aidPERSONIDDATA"))
for p in data_pids - paper_pids:
fields = run_sql("select count(*) from aidPERSONIDDATA where personid = %s and tag <> %s", (p, "canonical_name",))[0][0]
if fields == 0:
printer("Personid %d has no papers and nothing else than canonical_name." % p)
ret = False
return ret
def check_wrong_rejection(printer):
ret = True
all_rejections = run_sql("select personid, bibref_table, bibref_value, bibrec "
"from aidPERSONIDPAPERS "
"where flag = %s",
('-2',))
for rej in all_rejections:
sigs = run_sql("select personid from aidPERSONIDPAPERS "
"where bibref_table = %s "
"and bibref_value = %s "
"and bibrec = %s "
"and flag <> '-2'", rej[1:])
# To avoid duplication of error messages don't complain
# if the papers is assigned to more than one personids.
if not sigs:
printer("The paper (%s:%s,%s) was rejected from person %d, but never assigned or claimed." % (rej[1:] + rej[:1]))
ret = False
elif rej[1] in sigs:
printer("Personid %d has both assigned and rejected paper (%s:%s,%s)." % rej)
ret = False
return ret
def check_merger():
'''
This function presumes that copy_personid was
called before the merger.
'''
is_ok = True
old_claims = set(run_sql("select personid, bibref_table, bibref_value, bibrec, flag "
"from aidPERSONIDPAPERS_copy "
"where flag = -2 or flag = 2"))
cur_claims = set(run_sql("select personid, bibref_table, bibref_value, bibrec, flag "
"from aidPERSONIDPAPERS "
"where flag = -2 or flag = 2"))
errors = ((old_claims - cur_claims, "Some claims were lost during the merge."),
(cur_claims - old_claims, "Some new claims appeared after the merge."))
act = { -2 : 'Rejection', 2 : 'Claim' }
for err_set, err_msg in errors:
if err_set:
is_ok = False
bibauthor_print(err_msg)
bibauthor_print("".join(" %s: personid %d %d:%d,%d\n" %
(act[cl[6]], cl[0], int(cl[1]), cl[2], cl[3]) for cl in err_set))
old_assigned = set(run_sql("select bibref_table, bibref_value, bibrec "
"from aidPERSONIDPAPERS_copy"))
#"where flag <> -2 and flag <> 2"))
cur_assigned = set(run_sql("select bibref_table, bibref_value, bibrec "
"from aidPERSONIDPAPERS"))
#"where flag <> -2 and flag <> 2"))
errors = ((old_assigned - cur_assigned, "Some signatures were lost during the merge."),
(cur_assigned - old_assigned, "Some new signatures appeared after the merge."))
for err_sig, err_msg in errors:
if err_sig:
is_ok = False
bibauthor_print(err_msg)
bibauthor_print("".join(" %s:%d,%d\n" % sig for sig in err_sig))
return is_ok
def check_results():
is_ok = True
all_result_rows = run_sql("select * from aidRESULTS")
keyfunc = lambda x: x[1:]
duplicated = (d for d in (list(d) for k, d in groupby(sorted(all_result_rows, key=keyfunc), key=keyfunc)) if len(d) > 1)
for dd in duplicated:
is_ok = False
for d in dd:
print "%s %s %s %s" % d
print
clusters = {}
for rr in all_result_rows:
clusters[rr[0]] = clusters.get(rr[0], []) + [rr[3]]
faulty_clusters = dict((cid, len(recs) - len(set(recs)))
for cid, recs in clusters.items()
if not len(recs) == len(set(recs)))
if faulty_clusters:
is_ok = False
print "Recids NOT unique in clusters!"
print ("A total of %s clusters hold an average of %.2f duplicates" %
(len(faulty_clusters), (sum(faulty_clusters.values()) / float(len(faulty_clusters)))))
for c in faulty_clusters:
print "Name: %-20s Size: %4d Faulty: %2d" % (c, len(clusters[c]), faulty_clusters[c])
return is_ok
def check_claim_inspireid_contradiction():
iids10x = run_sql("select id from bib10x where tag = '100__i'")
iids70x = run_sql("select id from bib70x where tag = '700__i'")
refs10x = set(x[0] for x in run_sql("select id from bib10x where tag = '100__a'"))
refs70x = set(x[0] for x in run_sql("select id from bib70x where tag = '700__a'"))
if iids10x:
iids10x = list_2_SQL_str(iids10x, lambda x: str(x[0]))
iids10x = run_sql("select id_bibxxx, id_bibrec, field_number "
"from bibrec_bib10x "
"where id_bibxxx in %s"
% iids10x)
iids10x = ((row[0], [(ref, rec) for ref, rec in run_sql(
"select id_bibxxx, id_bibrec "
"from bibrec_bib10x "
"where id_bibrec = '%s' "
"and field_number = '%s'"
% row[1:])
if ref in refs10x])
for row in iids10x)
else:
iids10x = ()
if iids70x:
iids70x = list_2_SQL_str(iids70x, lambda x: str(x[0]))
iids70x = run_sql("select id_bibxxx, id_bibrec, field_number "
"from bibrec_bib70x "
"where id_bibxxx in %s"
% iids70x)
iids70x = ((row[0], [(ref, rec) for ref, rec in run_sql(
"select id_bibxxx, id_bibrec "
"from bibrec_bib70x "
"where id_bibrec = '%s' "
"and field_number = '%s'"
% (row[1:]))
if ref in refs70x])
for row in iids70x)
else:
iids70x = ()
# [(iids, [bibs])]
inspired = list(chain(((iid, list(set(('100', ) + bib for bib in bibs))) for iid, bibs in iids10x),
((iid, list(set(('700', ) + bib for bib in bibs))) for iid, bibs in iids70x)))
assert all(len(x[1]) == 1 for x in inspired)
inspired = ((k, map(itemgetter(0), map(itemgetter(1), d)))
for k, d in groupby(sorted(inspired, key=itemgetter(0)), key=itemgetter(0)))
# [(inspireid, [bibs])]
inspired = [([(run_sql("select personid "
"from aidPERSONIDPAPERS "
"where bibref_table = %s "
"and bibref_value = %s "
"and bibrec = %s "
"and flag = '2'"
, bib), bib)
for bib in cluster[1]], cluster[0])
for cluster in inspired]
# [([([pid], bibs)], inspireid)]
for cluster, iid in inspired:
pids = set(chain.from_iterable(imap(itemgetter(0), cluster)))
if len(pids) > 1:
print "InspireID: %s links the following papers:" % iid
print map(itemgetter(1), cluster)
print "More than one personid claimed them:"
print list(pids)
print
continue
if len(pids) == 0:
# not even one paper with this inspireid has been
# claimed, screw it
continue
pid = list(pids)[0][0]
# The last step is to check all non-claimed papers for being
# claimed by the person on some different signature.
problem = (run_sql("select bibref_table, bibref_value, bibrec "
"from aidPERSONIDPAPERS "
"where bibrec = %s "
"and personid = %s "
"and flag = %s"
, (bib[2], pid, 2))
for bib in (bib for lpid, bib in cluster if not lpid))
problem = list(chain.from_iterable(problem))
if problem:
print "A personid has claimed a paper from an inspireid cluster and a contradictory paper."
print "Personid %d" % pid
print "Inspireid cluster %s" % str(map(itemgetter(1), cluster))
print "Contradicting claims: %s" % str(problem)
print
def repair_personid():
'''
This should make check_personid_papers() to return true.
'''
pids = run_sql("select distinct personid from aidPERSONIDPAPERS")
lpids = len(pids)
for i, pid in enumerate((p[0] for p in pids)):
update_status(float(i) / lpids, "Checking per-pid...")
rows = run_sql("select bibrec, bibref_table, bibref_value, flag "
"from aidPERSONIDPAPERS where personid = %s", (pid,))
rows = ((k, list(d))
for k, d in groupby(sorted(rows, key=itemgetter(0)), itemgetter(0)))
for rec, sigs in rows:
if len(sigs) > 1:
claimed = [sig for sig in sigs if sig[3] > 1]
rejected = [sig for sig in sigs if sig[3] < -1]
if len(claimed) == 1:
sigs.remove(claimed[0])
elif len(claimed) == 0 and len(rejected) == 1:
sigs.remove(rejected[0])
for sig in set(sigs):
run_sql("delete from aidPERSONIDPAPERS "
"where personid = %s "
"and bibrec = %s "
"and bibref_table = %s "
"and bibref_value = %s "
"and flag = %s"
, (pid, sig[0], sig[1], sig[2], sig[3]))
update_status_final("Done with per-pid fixing.")
recs = run_sql("select distinct bibrec from aidPERSONIDPAPERS")
lrecs = len(recs)
for i, rec in enumerate((r[0] for r in recs)):
update_status(float(i) / lrecs, "Checking per-rec...")
rows = run_sql("select bibref_table, bibref_value, flag from aidPERSONIDPAPERS "
"where bibrec = %s", (rec,))
kfuc = itemgetter(slice(0, 2))
rows = ((k, map(itemgetter(2), d)) for k, d in groupby(sorted(rows), kfuc))
for bibref, flags in rows:
if len(flags) > 1:
claimed = sum(1 for f in flags if f > 1)
rejected = sum(1 for f in flags if f < -1)
if claimed == 1:
run_sql("delete from aidPERSONIDPAPERS "
"where bibrec = %s "
"and bibref_table = %s "
"and bibref_value = %s "
"and flag <> %s"
, (rec, bibref[0], bibref[1], 2))
elif claimed == 0 and rejected == 1:
run_sql("delete from aidPERSONIDPAPERS "
"where bibrec = %s "
"and bibref_table = %s "
"and bibref_value = %s "
"and flag <> %s"
, (rec, bibref[0], bibref[1], -2))
else:
run_sql("delete from aidPERSONIDPAPERS "
"where bibrec = %s "
"and bibref_table = %s "
"and bibref_value = %s"
, (rec, bibref[0], bibref[1]))
update_status_final("Done with per-rec fixing.")
update_status(0 / 1, "Fixing wrong names...")
wrong_names, number = get_wrong_names()
for i, w in enumerate(wrong_names):
update_status(i / number, "Fixing wrong names...")
if w[2]:
run_sql("update aidPERSONIDPAPERS set name=%s where bibref_table=%s and bibref_value=%s",
(w[2], w[0], w[1]))
else:
run_sql("delete from aidPERSONIDPAPERS where bibref_table=%s and bibref_value=%s",
(w[0], w[1]))
no_rejs = frozenset(run_sql("select bibref_table, bibref_value, bibrec from aidPERSONIDPAPERS where flag <> -2"))
rejs = frozenset(run_sql("select bibref_table, bibref_value, bibrec from aidPERSONIDPAPERS where flag = -2"))
floating_rejs = rejs - no_rejs
update_personID_canonical_names(map(new_person_from_signature, floating_rejs))
update_status_final("Fixed all wrong names.")
update_status(0, "Checking missing canonical names...")
paper_pids = run_sql("select distinct personid from aidPERSONIDPAPERS")
cname_pids = run_sql("select distinct personid from aidPERSONIDDATA where tag='canonical_name'")
missing_cnames = list(set(p[0] for p in paper_pids) - set(p[0] for p in cname_pids))
npids = len(missing_cnames)
for pid in missing_cnames:
update_status(missing_cnames.index(pid) / float(npids), "Creating missing canonical names...")
update_personID_canonical_names([pid])
update_status_final("Done restoring canonical names.")
def get_all_bibrecs():
return [x[0] for x in run_sql("select distinct bibrec from aidPERSONIDPAPERS")]
def remove_all_bibrecs(bibrecs):
bibrecs_s = list_2_SQL_str(bibrecs)
run_sql("delete from aidPERSONIDPAPERS where bibrec in %s" % bibrecs_s)
def empty_results_table():
run_sql("TRUNCATE aidRESULTS")
def save_cluster(named_cluster):
name, cluster = named_cluster
for bib in cluster.bibs:
run_sql("INSERT INTO aidRESULTS "
"(personid, bibref_table, bibref_value, bibrec) "
"VALUES (%s, %s, %s, %s) "
, (name, str(bib[0]), bib[1], bib[2]))
def remove_result_cluster(name):
run_sql("DELETE FROM aidRESULTS "
"WHERE personid like '%s%%'"
% name)
def personid_name_from_signature(sig):
ret = run_sql("select personid, name "
"from aidPERSONIDPAPERS "
"where bibref_table = %s and bibref_value = %s and bibrec = %s "
"and flag > '-2'"
, sig)
assert len(ret) < 2, ret
return ret
def personid_from_signature(sig):
ret = run_sql("select personid, flag "
"from aidPERSONIDPAPERS "
"where bibref_table = %s and bibref_value = %s and bibrec = %s "
"and flag > '-2'"
, sig)
assert len(ret) < 2, ret
return ret
def in_results(name):
return run_sql("select count(*) "
"from aidRESULTS "
"where personid like %s"
, (name + '.0',))[0][0] > 0
def get_signature_info(sig):
ret = run_sql("select personid, flag "
"from aidPERSONIDPAPERS "
"where bibref_table = %s and bibref_value = %s and bibrec = %s "
"order by flag"
, sig)
return ret
def get_claimed_papers(pid):
return run_sql("select bibref_table, bibref_value, bibrec "
"from aidPERSONIDPAPERS "
"where personid = %s "
"and flag > %s",
(pid, 1))
def copy_personids():
run_sql("DROP TABLE IF EXISTS `aidPERSONIDDATA_copy`")
run_sql("CREATE TABLE `aidPERSONIDDATA_copy` ( "
"`personid` BIGINT( 16 ) UNSIGNED NOT NULL , "
"`tag` VARCHAR( 64 ) NOT NULL , "
"`data` VARCHAR( 256 ) NOT NULL , "
"`opt1` MEDIUMINT( 8 ) DEFAULT NULL , "
"`opt2` MEDIUMINT( 8 ) DEFAULT NULL , "
"`opt3` VARCHAR( 256 ) DEFAULT NULL , "
"KEY `personid-b` ( `personid` ) , "
"KEY `tag-b` ( `tag` ) , "
"KEY `data-b` ( `data` ) , "
"KEY `opt1` ( `opt1` ) "
") ENGINE = MYISAM DEFAULT CHARSET = utf8")
run_sql("INSERT INTO `aidPERSONIDDATA_copy` "
"SELECT * "
"FROM `aidPERSONIDDATA`")
run_sql("DROP TABLE IF EXISTS `aidPERSONIDPAPERS_copy`")
run_sql("CREATE TABLE `aidPERSONIDPAPERS_copy` ( "
"`personid` bigint( 16 ) unsigned NOT NULL , "
"`bibref_table` enum( '100', '700' ) NOT NULL , "
"`bibref_value` mediumint( 8 ) unsigned NOT NULL , "
"`bibrec` mediumint( 8 ) unsigned NOT NULL , "
"`name` varchar( 256 ) NOT NULL , "
"`flag` smallint( 2 ) NOT NULL DEFAULT '0', "
"`lcul` smallint( 2 ) NOT NULL DEFAULT '0', "
"`last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP , "
"KEY `personid-b` ( `personid` ) , "
"KEY `reftable-b` ( `bibref_table` ) , "
"KEY `refvalue-b` ( `bibref_value` ) , "
"KEY `rec-b` ( `bibrec` ) , "
"KEY `name-b` ( `name` ) , "
"KEY `timestamp-b` ( `last_updated` ) , "
"KEY `ptvrf-b` ( `personid` , `bibref_table` , `bibref_value` , `bibrec` , `flag` ) "
") ENGINE = MyISAM DEFAULT CHARSET = utf8")
run_sql("INSERT INTO `aidPERSONIDPAPERS_copy` "
"SELECT * "
"FROM `aidPERSONIDPAPERS")
def delete_empty_persons():
pp = run_sql("select personid from aidPERSONIDPAPERS")
pp = set(p[0] for p in pp)
pd = run_sql("select personid from aidPERSONIDDATA")
pd = set(p[0] for p in pd)
fpd = run_sql("select personid from aidPERSONIDDATA where tag <> 'canonical_name'")
fpd = set(p[0] for p in fpd)
to_delete = pd - (pp | fpd)
if to_delete:
run_sql("delete from aidPERSONIDDATA where personid in %s" % list_2_SQL_str(to_delete))
def restore_personids():
run_sql("TRUNCATE `aidPERSONIDDATA`")
run_sql("INSERT INTO `aidPERSONIDDATA` "
"SELECT * "
"FROM `aidPERSONIDDATA_copy`")
run_sql("TRUNCATE `aidPERSONIDPAPERS`")
run_sql("INSERT INTO `aidPERSONIDPAPERS` "
"SELECT * "
"FROM `aidPERSONIDPAPERS_copy")
def get_possible_personids_from_paperlist_old(bibrecreflist):
'''
@param bibrecreflist: list of bibrecref couples, (('100:123,123',),) or bibrecs (('123',),)
returns a list of pids and connected bibrefs in order of number of bibrefs per pid
[ [['1'],['123:123.123','123:123.123']] , [['2'],['123:123.123']] ]
'''
pid_bibrecref_dict = {}
for b in bibrecreflist:
pids = []
try:
pids = run_sql("select personid from aidPERSONID "
"use index (`tdf-b`) where tag=%s and data=%s", ('paper', str(b[0])))
except (OperationalError, ProgrammingError):
pids = run_sql("select personid from aidPERSONID "
"where tag=%s and data=%s", ('paper', str(b[0])))
for pid in pids:
if pid[0] in pid_bibrecref_dict:
pid_bibrecref_dict[pid[0]].append(str(b[0]))
else:
pid_bibrecref_dict[pid[0]] = [str(b[0])]
pid_list = [[i, pid_bibrecref_dict[i]] for i in pid_bibrecref_dict]
return sorted(pid_list, key=lambda k: len(k[2]), reverse=True)
def resolve_affiliation(ambiguous_aff_string):
"""
This is a method available in the context of author disambiguation in ADS
only. No other platform provides the db table used by this function.
@warning: to be used in an ADS context only.
@param ambiguous_aff_string: Ambiguous affiliation string
@type ambiguous_aff_string: str
@return: The normalized version of the name string as presented in the database
@rtype: str
"""
if not ambiguous_aff_string or not bconfig.CFG_ADS_SITE:
return "None"
aff_id = run_sql("select aff_id from ads_affiliations where affstring=%s", (ambiguous_aff_string,))
if aff_id:
return aff_id[0][0]
else:
return "None"
|
jrbl/invenio
|
modules/bibauthorid/lib/bibauthorid_dbinterface.py
|
Python
|
gpl-2.0
| 81,035
|
# pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.cmus import Module
class TestCmusModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self.songTemplate = """
status {status}
file /path/to/file
duration {duration}
position {position}
tag title {title}
tag artist {artist}
tag album {album}
tag tracknumber 1
tag date 1984
tag comment comment
"""
def tearDown(self):
mocks.teardown_test(self)
def test_read_song(self):
self.popen.mock.communicate.return_value = ("song", None)
self.module.update_all()
self.popen.assert_call("cmus-remote -Q")
def test_handle_runtimeerror(self):
self.popen.mock.communicate.side_effect = RuntimeError("error loading song")
self.module.update_all()
self.assertEquals(self.module.description(self.anyWidget), " - /")
def test_format(self):
self.popen.mock.communicate.return_value = (self.songTemplate.format(
artist="an artist", title="a title", duration="100", position="20",
album="an album", status="irrelevant"
), None)
self.module.update_all()
self.anyWidget.set("theme.width", 1000)
self.assertEquals(self.module.description(self.anyWidget),
"an artist - a title 00:20/01:40"
)
def test_scrollable_format(self):
self.popen.mock.communicate.return_value = (self.songTemplate.format(
artist="an artist", title="a title", duration="100", position="20",
album="an album", status="irrelevant"
), None)
self.module.update_all()
self.anyWidget.set("theme.width", 10)
self.assertEquals(self.module.description(self.anyWidget),
"an artist - a title 00:20/01:40"[:10]
)
def test_repeat(self):
self.popen.mock.communicate.return_value = ("set repeat false", None)
self.module.update_all()
self.assertTrue("repeat-off" in self.module.state(self.module.widget("cmus.repeat")))
self.popen.mock.communicate.return_value = ("set repeat true", None)
self.module.update_all()
self.assertTrue("repeat-on" in self.module.state(self.module.widget("cmus.repeat")))
def test_shuffle(self):
self.popen.mock.communicate.return_value = ("set shuffle false", None)
self.module.update_all()
self.assertTrue("shuffle-off" in self.module.state(self.module.widget("cmus.shuffle")))
self.popen.mock.communicate.return_value = ("set shuffle true", None)
self.module.update_all()
self.assertTrue("shuffle-on" in self.module.state(self.module.widget("cmus.shuffle")))
def test_prevnext(self):
self.assertTrue("prev" in self.module.state(self.module.widget("cmus.prev")))
self.assertTrue("next" in self.module.state(self.module.widget("cmus.next")))
def test_main(self):
self.popen.mock.communicate.return_value = ("status paused", None)
self.module.update_all()
self.assertTrue("paused" in self.module.state(self.module.widget("cmus.main")))
self.popen.mock.communicate.return_value = ("status playing", None)
self.module.update_all()
self.assertTrue("playing" in self.module.state(self.module.widget("cmus.main")))
self.popen.mock.communicate.return_value = ("status stopped", None)
self.module.update_all()
self.assertTrue("stopped" in self.module.state(self.module.widget("cmus.main")))
def test_widget(self):
self.assertEquals(len(self.module.widgets()), 5)
for idx, val in enumerate(["prev", "main", "next", "shuffle", "repeat"]):
self.assertEquals(self.module.widgets()[idx].name, "cmus.{}".format(val))
def test_interaction(self):
events = [
{"widget": "cmus.shuffle", "action": "cmus-remote -S"},
{"widget": "cmus.repeat", "action": "cmus-remote -R"},
{"widget": "cmus.next", "action": "cmus-remote -n"},
{"widget": "cmus.prev", "action": "cmus-remote -r"},
{"widget": "cmus.main", "action": "cmus-remote -u"},
]
for event in events:
mocks.mouseEvent(stdin=self.stdin, inp=self.input, module=self.module, instance=self.module.widget(event["widget"]).id, button=LEFT_MOUSE)
self.popen.assert_call(event["action"])
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
TechRunner2/i3-gaps-rice
|
.config/i3/bar/tests/modules/test_cmus.py
|
Python
|
gpl-2.0
| 4,535
|
import requests
from bs4 import BeautifulSoup
import re
from flask import current_app
from flask_mail import Mail, Message
from app import create_app
app = create_app ('config')
mail = Mail(app)
msg = Message("Failed to add sites",
sender="leo@search.techarena51.com",
recipients=["leo.gonzalvez@gmail.com"])
subs = [ "programming", "python", "coding"]
for sub in subs:
headers = { 'user-agent': 'testing:v0.1 (by /u/12boy)' }
site = requests.get("http://www.reddit.com/r/{}".format(sub) ,headers=headers)
soup = BeautifulSoup(site.text, 'html.parser')
#print (site.text)
for div in soup.find_all('div', class_=re.compile('thing')):
try:
score = int(div.div.contents[2].get_text())
if score > 10:
url = div.div.next_sibling.a.get('href')
tag = div.div.next_sibling.a.get_text()
content = ""
with app.app_context():
from app.sites.models import Sites
site=Sites(url, content, tag, reddit_score=score)
error = site.add(site)
if error != None:
msg.body =" URL : {url}, tag: {tag}, score:{score}, html : {div}, ERROR = {error}".format(url=url,
tag=tag,
score=score,
div=div,
error=error
)
mail.send(msg)
continue
except ValueError:
continue
|
Leo-g/Flask-FullTextSearch
|
scrape_r_python.py
|
Python
|
gpl-2.0
| 2,134
|
from NSCP import Settings, Registry, Core, log, status, log_debug, log_error, sleep
from test_helper import BasicTest, TestResult, Callable, setup_singleton, install_testcases, init_testcases, shutdown_testcases
from types import *
from time import time
import random
import os
prefix = 'scheduler'
class SchedulerTest(BasicTest):
check_count = 0
results_count = 0
command_count = {}
sched_alias = 'test_sched_%s'%prefix
python_channel = 'test_sched_%s_py'%prefix
command = 'test_sched_%s'%prefix
sched_base_path = '/settings/%s'%sched_alias
def simple_check_handler(arguments):
instance = SchedulerTest.getInstance()
return instance.wrapped_simple_check_handler(arguments)
simple_check_handler = Callable(simple_check_handler)
def wrapped_simple_check_handler(self, arguments):
self.check_count = self.check_count + 1
if arguments:
if not arguments[0] in self.command_count:
self.command_count[arguments[0]] = 1
else:
self.command_count[arguments[0]] = self.command_count[arguments[0]] + 1
return (status.OK, arguments[0], '')
return (status.OK, 'pong', '')
def on_stress_handler(channel, source, command, code, message, perf):
instance = SchedulerTest.getInstance()
instance.wrapped_on_stress_handler(channel, source, command, code, message, perf)
on_stress_handler = Callable(on_stress_handler)
def wrapped_on_stress_handler(self, channel, source, command, code, message, perf):
self.results_count = self.results_count + 1
return None
def desc(self):
return 'Testcase for Scheduler'
def title(self):
return 'Test Scheduler'
def setup(self, plugin_id, prefix):
self.reg.simple_function(self.command, SchedulerTest.simple_check_handler, 'This is a simple noop command')
self.reg.simple_subscription(self.python_channel, SchedulerTest.on_stress_handler)
#self.core.reload('%s,delayed'%self.sched_alias)
def teardown(self):
self.conf.set_string(self.sched_base_path, 'threads', '0')
self.core.reload(self.sched_alias)
def check_one(self, result, key, min, max):
result.assert_gt(self.command_count[key], min, 'check %s (%d) fired more then %d'%(key, self.command_count[key], min))
result.assert_lt(self.command_count[key], max, 'check %s (%d) fired less then %d'%(key, self.command_count[key], max))
def run_test(self):
self.core.load_module('Scheduler', self.sched_alias)
result = TestResult()
start = time()
last_major = 0
elapsed = time()-start
while elapsed < 60:
if elapsed > 0:
log("testing scheduler %d%% (collected %d instance in %d seconds)"%(elapsed/60*100, self.results_count, elapsed))
sleep(2000)
elapsed = time()-start
result.add_message(True, 'Summary Collected %d instance in %d seconds: %d/s'%(self.results_count, elapsed, self.results_count/elapsed))
self.check_one(result, "rand", 5, 10)
self.check_one(result, "1s", 55, 65)
self.check_one(result, "short", 10, 14)
self.check_one(result, "30s", 1, 3)
self.check_one(result, "explicit", 10, 14)
self.check_one(result, "10s", 5, 7)
return result
def install(self, arguments):
# Configure required modules
self.conf.set_string('/modules', 'pytest', 'PythonScript')
#self.conf.set_string('/modules', self.sched_alias, 'Scheduler')
self.conf.set_string('/modules', 'CheckSystem', 'enabled')
# Configure python
self.conf.set_string('/settings/pytest/scripts', 'test_stress', 'test_scheduler.py')
default_path = '%s/schedules/default'%self.sched_base_path
self.conf.set_string(default_path, 'channel', self.python_channel)
self.conf.set_string(default_path, 'command', "%s default"%self.command)
self.conf.set_string(default_path, 'interval', '5s')
self.conf.set_string(default_path, 'randomness', '0%')
self.conf.set_string('%s/schedules'%(self.sched_base_path), 'python_checker_d', "%s short"%self.command)
self.conf.set_string('%s/schedules/python_checker_e'%(self.sched_base_path), 'command', "%s explicit"%self.command)
#self.conf.set_string('%s/schedules/python_checker_i'%(self.sched_base_path), 'interval', '1s')
self.conf.set_string('%s/schedules/python_checker_1s'%(self.sched_base_path), 'command', "%s 1s"%self.command)
self.conf.set_string('%s/schedules/python_checker_1s'%(self.sched_base_path), 'interval', '1s')
self.conf.set_string('%s/schedules/python_checker_10s'%(self.sched_base_path), 'command', "%s 10s"%self.command)
self.conf.set_string('%s/schedules/python_checker_10s'%(self.sched_base_path), 'interval', '10s')
self.conf.set_string('%s/schedules/python_checker_30s'%(self.sched_base_path), 'command', "%s 30s"%self.command)
self.conf.set_string('%s/schedules/python_checker_30s'%(self.sched_base_path), 'interval', '30s')
self.conf.set_string('%s/schedules/python_checker_r10s'%(self.sched_base_path), 'command', "%s rand"%self.command)
self.conf.set_string('%s/schedules/python_checker_r10s'%(self.sched_base_path), 'interval', '10s')
self.conf.set_string('%s/schedules/python_checker_r10s'%(self.sched_base_path), 'randomness', '50%')
self.conf.save()
def uninstall(self):
None
def help(self):
None
def init(self, plugin_id, prefix):
self.reg = Registry.get(plugin_id)
self.conf = Settings.get(plugin_id)
self.core = Core.get(plugin_id)
None
def shutdown(self):
None
def require_boot(self):
return True
setup_singleton(SchedulerTest)
all_tests = [SchedulerTest]
def __main__(args):
install_testcases(all_tests)
def init(plugin_id, plugin_alias, script_alias):
init_testcases(plugin_id, plugin_alias, script_alias, all_tests)
def shutdown():
shutdown_testcases()
|
mickem/nscp
|
scripts/python/test_scheduler.py
|
Python
|
gpl-2.0
| 6,169
|
import numpy as np
class Node:
"""
The node of the signal flow graph, implements calc_func() that is called in the main loop of the graph.
This is the class that should be subclassed, for building new signal processing effects.
When subclassing you simply:
1. create the needed [Obj]InWire and [Obj]OutWire, as attributes of the subclass
2. implement the processing function calc_func()
"""
def __init__(self, world):
"""
Node constructor. Should be always called when subclassing with super().__init__(world).
Here you instantiate all the needed data, and [Obj]InWire and [Obj]OutWire.
Parameters
----------
world : World
an istance of the world (the current graph)
"""
self.world = world
self.in_wires = []
self.out_wires = []
def calc_func(self):
"""
This is the actual processing function. Implement this with the specific functionality.
"""
pass
class Group(Node):
"""
A Group of Nodes and a Node itself. Implements the composite design pattern for the Nodes.
Create the nodes and then simply append the output node[s] to the group.
Expose the inner Node's [Obj]In|OutWire through [Obj]In|OutWireAdaptor
"""
def __init__(self, world):
super().__init__(world)
self.nodesList = []
self.is_sorted = True
def append(self, node):
if node in self.nodesList:
print("Trying to append a node that is already in the group, doing nothing")
return
self.nodesList.append(node)
self.is_sorted = False
def calc_func(self):
if not self.is_sorted:
self.sort()
for n in self.nodesList:
n.calc_func()
def clear(self):
self.nodesList = []
self.is_sorted = True
def _topological_sort_util(self, v, connections, visited, stack):
visited[v] = True
adj = np.where(connections[v] == 1)[0]
for i in adj:
if not visited[i]:
self._topological_sort_util(i, connections, visited, stack)
stack.append(v)
def _topological_sort(self, connections):
stack = []
assert(connections.ndim == 2 and connections.shape[0] == connections.shape[1])
nnodes = connections.shape[0]
visited = [False for _ in range(nnodes)]
for v in range(nnodes):
if not visited[v]:
self._topological_sort_util(v, connections, visited, stack)
stack.reverse()
return stack
def _add_parents(self):
"""
Traverse the graph with depth first search.
Add all the nodes that are required by the nodes in self.nodesList.
Update self.nodesList at the end with all the nodes in the graph
"""
nl = self.nodesList
visited, stack = set(), nl
while stack:
inode = stack.pop()
if inode not in visited:
# print("visiting " + str(inode.__class__.__name__))
visited.add(inode)
for iw in inode.in_wires:
ow = iw.out_wire()
if(ow is not None and
(ow.parent not in stack) and
(ow.parent not in visited)):
# print("adding " + str(ow.parent.__class__.__name__))
stack.append(ow.parent)
self.nodesList = list(visited)
def sort(self):
self._add_parents() # add all the parents of the added nodes
nl = self.nodesList
nnodes = len(nl)
connections = np.zeros((nnodes, nnodes), dtype=int)
for i1 in range(nnodes):
for ow in nl[i1].out_wires:
for iw in ow.in_wires():
if iw.parent not in nl:
raise ValueError(str(iw.parent.__class__.__name__) +
" is not in the graph, although connected to the output of " +
str(ow.parent.__class__.__name__))
i2 = nl.index(iw.parent)
connections[i1, i2] = 1
sorted_list = self._topological_sort(connections)
self.nodesList = [self.nodesList[i] for i in sorted_list]
self.is_sorted = True
|
brunodigiorgi/pyAudioGraph
|
pyAudioGraph/AudioGraph.py
|
Python
|
gpl-2.0
| 4,400
|
# -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org),
# Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/or.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'BackgroundColor',
'BorderColor',
'HelpView'
]
handlers = [
'on_cancel_clicked',
'on_or_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/or" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_or_confirm_clicked(self, *args):
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
def getHelp(self):
return"Permite a operação lógica 'OU' entre as duas entradas. Para esse bloco há duas possibilidades.\nPrimeira: Executa a operação entre duas imagens ponto a ponto. \nSegunda: Executa a operação entre um valor constante e cada ponto da imagem."
# OrProperties = Properties()
# OrProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
import harpia.gerador
blockTemplate.imagesIO = \
'IplImage * block' + blockTemplate.blockNumber + '_img_i1 = NULL;\n' + \
'IplImage * block' + blockTemplate.blockNumber + '_img_i2 = NULL;\n' + \
'IplImage * block' + blockTemplate.blockNumber + '_img_o1 = NULL;\n'
blockTemplate.functionCall = '\nif(block' + blockTemplate.blockNumber + '_img_i1){\n' + \
'block' + blockTemplate.blockNumber + '_img_o1 = cvCreateImage(cvSize(block' + blockTemplate.blockNumber + \
'_img_i1->width,block' + blockTemplate.blockNumber + '_img_i1->height),block' + blockTemplate.blockNumber + \
'_img_i1->depth,block' + blockTemplate.blockNumber + '_img_i1->nChannels);\n' + \
harpia.gerador.inputSizeComply(2, blockTemplate.blockNumber) + 'cvOr(block' + \
blockTemplate.blockNumber + '_img_i1, block' + blockTemplate.blockNumber + '_img_i2, block' + \
blockTemplate.blockNumber + '_img_o1,0);\n cvResetImageROI(block' + blockTemplate.blockNumber + '_img_o1);}\n'
blockTemplate.dealloc = 'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_o1);\n' + \
'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_i1);\n' + \
'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_i2);\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Or"),
"Path": {"Python": "Or",
"Glade": "glade/or.ui",
"Xml": "xml/or.xml"},
"Inputs": 2,
"Outputs": 1,
"Icon": "images/or.png",
"Color": "10:180:10:150",
"InTypes": {0: "HRP_IMAGE", 1: "HRP_IMAGE"},
"OutTypes": {0: "HRP_IMAGE"},
"Description": _("Logical OR operation between two images."),
"TreeGroup": _("Arithmetic and logical operations")
}
|
samuelfd/harpia
|
harpia/bpGUI/Or.py
|
Python
|
gpl-2.0
| 6,118
|
#!/usr/bin/python
from plotbridge.plot import Plot
import numpy as np
p = Plot('spiral', template='gnuplot_2d_with_direction',
overwrite=True)
p.set_width(300); p.set_height(300)
t = np.linspace(0, 10*np.pi, 101)
curve_in_complex_plane = np.exp(-t/10. + 1j*t)
p.add_trace(curve_in_complex_plane)
p.update(); p.run()
|
govenius/plotbridge
|
examples/gnuplot_with_direction/with_direction.py
|
Python
|
gpl-2.0
| 329
|
from starstoloves.lib.repository import RepositoryItem
class ConnectionHelper(RepositoryItem):
DISCONNECTED = 0;
CONNECTED = 1;
FAILED = 2;
state = None
username = None
def __init__(self, username=None, state=None, **kwargs):
self.username = username
if state is None:
state = self.DISCONNECTED
self.state = state
super().__init__(**kwargs)
@property
def is_connected(self):
return self.state is self.CONNECTED
def disconnect(self):
self.repository.delete(self)
|
tdhooper/starstoloves
|
starstoloves/lib/connection/connection.py
|
Python
|
gpl-2.0
| 565
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2021: SCS Software
import bpy
import os
import subprocess
import shutil
from sys import platform
from io_scs_tools.utils.printout import lprint
from io_scs_tools.utils import get_scs_globals as _get_scs_globals
_KNOWN_PROJECT_BASES = ("base_vehicle", "base_share", "base")
def strip_sep(path):
"""Strips double path separators (slashes and backslashes) on the start and the end of the given path
:param path: path to strip separators from
:type path: str
:return: new stripped path
:rtype: str
"""
return path.strip("\\\\").strip("//")
def get_filename(path, with_ext=True):
"""Returns file name from given file, with or without extension.
It finds last "os.sep" inside string and returns tail.
:param path: path with file name
:type path: str
:param with_ext: return file name with extension or not
:type with_ext: bool
:return: file name with or without extension
:rtype: str
"""
# find last separator; prefer os.sep otherwise search for normal slash
last_sep = path.rfind(os.sep)
if last_sep < 0:
last_sep = path.rfind("/")
new_path = path[last_sep + 1:]
if not with_ext and new_path.rfind(".") > 0:
new_path = new_path[:new_path.rfind(".")]
return new_path
def repair_path(filepath):
"""Takes a Blender filepath and tries to make it a valid absolute path."""
if filepath != '':
# print('0 filepath:\n"%s"' % filepath)
filepath = bpy.path.abspath(filepath, start=None, library=None) # make the path absolute
# print('1 filepath:\n"%s"' % filepath)
filepath = os.path.normpath(filepath) # repair things like "...\dir\dir\..\dir\..."
# print('2 filepath:\n"%s"' % filepath)
return filepath
def relative_path(base_path, path):
"""Takes a base path and other path and returns the relative version of the second path
if possible, otherwise it returns the original one (absolute path)."""
repaired_base_path = repair_path(base_path)
# print('repaired_base_path:\n\t"%s"' % repaired_base_path)
repaired_path = repair_path(path)
# print('repaired_path:\n\t"%s"' % repaired_path)
if len(repaired_base_path) > 2:
# presuming that equality of first two chars means we are on same mount point
if startswith(repaired_path[:2], repaired_base_path[:2]):
rel_path = os.path.relpath(repaired_path, repaired_base_path).replace("\\", "/")
# print('rel_path:\n\t"%s"' % rel_path)
if not rel_path.startswith("//"):
rel_path = "//" + rel_path
return rel_path
else:
lprint("W Not possible to create a relative path! Returning absolute path (%s)", (repaired_path,))
return repaired_path
else:
lprint("W No base path specified! It's not possible to create a relative path! Returning absolute path (%s)", (repaired_path,))
return repaired_path
def get_possible_project_infixes(include_zero_infix=False, append_sep=False):
"""Gets possible project infixes in relation to currently selected SCS Project Path.
If path is ending with "dlc_" prefixed directory, then first infix is parent dlc.
Then possible base prefixes are added (sibling and parent known bases).
If alternative bases are disabled no extra infixes are added except zero infix if requested.
:param include_zero_infix: should empty infix be included into the list
:type include_zero_infix: bool
:param append_sep: should we add final separator to the paths for easy path concatenation
:type append_sep: bool
:return: list of possible project infixes
:rtype: list[str]
"""
infixes = []
if include_zero_infix:
infixes.append("")
if not _get_scs_globals().use_alternative_bases:
return infixes
project_path = _get_scs_globals().scs_project_path
project_path_basename = os.path.basename(project_path)
final_sep = ""
if append_sep:
final_sep = os.sep
# dlc infixes
if project_path_basename.startswith("dlc_"):
infixes.append(str((os.pardir + os.sep) * 2) + project_path_basename + final_sep)
# base infixes
for known_base in _KNOWN_PROJECT_BASES:
infixes.extend((os.pardir + os.sep + known_base + final_sep, str((os.pardir + os.sep) * 2) + known_base + final_sep))
return infixes
def get_abs_path(path_in, subdir_path='', is_dir=False, skip_mod_check=False):
"""Gets absolute path to the "SCS Project Base Path" if given path is relative (starts with: "//"),
otherwise original path is returned.
If relative path is existing and valid, it returns the absolute path, otherwise None.
Optionally a subdir_path can be provided, which will be added to the 'SCS Project Base Path'.
If skipping of mod check is not specified then function will also try to look in two
parent base directories, in the case "SCS Project Base Path" is currently set to mod/dlc package.
:param path_in: Absolute or relative path to current 'SCS Project Base path'
:type path_in: str
:param subdir_path: Additional subdirs can be provided, they will be added to the 'SCS Project Base Path'
:type subdir_path: str
:param is_dir: flag specifying if given path should be directory
:type is_dir: bool
:param skip_mod_check: flag specifying if check for dlc/mod should be skipped
:type skip_mod_check: bool
:return: Absolute path or None
:rtype: str
"""
# correct skip mod check switch if usage of alternative bases is switched off by user
skip_mod_check |= not _get_scs_globals().use_alternative_bases
root_path = _get_scs_globals().scs_project_path
if subdir_path != '':
root_path = os.path.join(root_path, subdir_path)
if path_in.startswith("//"):
if len(root_path) > 2:
result = os.path.join(root_path, path_in[2:])
else:
result = None
else:
result = path_in
# use subdir_path as last item, so that if file/dir not found we return correct abs path, not the last checked from parents dir
infixes = get_possible_project_infixes() + [subdir_path, ]
existance_check = os.path.isdir if is_dir else os.path.isfile
while infixes and result is not None and not existance_check(result) and not skip_mod_check:
result = get_abs_path(path_in, subdir_path=infixes.pop(0), is_dir=is_dir, skip_mod_check=True)
return result
def get_abs_paths(filepath, is_dir=False, include_nonexist_alternative_bases=False, use_infixed_search=False):
"""Gets existing absolute paths to the "SCS Project Base Path" including searching for "base" folder
one and two levels higher in filesystem hierachy.
:param filepath: relative or absolute filepath
:type filepath: str
:param is_dir: flag specifying if given path should be directory
:type is_dir: bool
:param include_nonexist_alternative_bases: flag specifying if none existing absolute filepaths from alternative bases should be included in result
:type include_nonexist_alternative_bases: bool
:param use_infixed_search: search also for infixed filepaths? Meant for infixed library SII file searching eg. sign.dlc_north.sii
:type use_infixed_search: bool
:return: list of absolute paths or empty list if path not found
:rtype: list[str]
"""
abs_paths = {}
"""
Store paths in dictionary to easily filter out duplicated paths.
So make sure to use normalized paths as keys and actual paths as values which should be returned as result.
"""
existance_check = os.path.isdir if is_dir else os.path.isfile
for sub_dir in get_possible_project_infixes(include_zero_infix=True):
infixed_abs_path = get_abs_path(filepath, subdir_path=sub_dir, is_dir=is_dir, skip_mod_check=True)
# additionally search for infixed files (eg. sign.dlc_north.sii)
if use_infixed_search:
infixed_files = get_all_infixed_file_paths(infixed_abs_path)
else:
infixed_files = [infixed_abs_path]
for resulted_path in infixed_files:
# ignore not found paths
if resulted_path is None:
continue
# create normalized path to properly gather only unique paths
normalized_resulted_path = full_norm(resulted_path)
if (include_nonexist_alternative_bases or existance_check(resulted_path)) and normalized_resulted_path not in abs_paths:
abs_paths[normalized_resulted_path] = resulted_path
# we are returning de-normalized paths, as they might be used in printout and precious information
# about origin of the path can be lost. (Eg. library was found in parent "base" directory,
# but if we return normalized path this information will be lost)
return abs_paths.values()
def is_valid_shader_texture_path(shader_texture):
"""It returns True if there is valid Shader Texture file, otherwise False.
:param shader_texture: SCS texture path, can be absolute or relative
:type shader_texture: str
:return: True if there is valid Shader Texture file, otherwise False
:rtype: bool
"""
if shader_texture != "":
if shader_texture.startswith("//"): # RELATIVE PATH
shader_texture_abs_path = get_abs_path(shader_texture)
if shader_texture_abs_path and os.path.isfile(shader_texture_abs_path):
return True
else: # ABSOLUTE PATH
if os.path.isfile(shader_texture):
return True
return False
def is_valid_shader_presets_library_path():
"""It returns True if there is valid "*.txt" file in
the Shader Presets Library directory, otherwise False."""
scs_globals = _get_scs_globals()
# check if default presets path is valid
if not scs_globals.shader_presets_use_custom:
return get_shader_presets_filepath() != ""
# check if set custom preset path is valid
shader_presets_filepath = scs_globals.shader_presets_filepath
if shader_presets_filepath != "":
if shader_presets_filepath.startswith("//"): # RELATIVE PATH
shader_presets_abs_path = get_abs_path(shader_presets_filepath)
if shader_presets_abs_path:
if os.path.isfile(shader_presets_abs_path):
return True
else: # ABSOLUTE PATH
if os.path.isfile(shader_presets_filepath):
return True
return False
def is_valid_trigger_actions_rel_path():
"""It returns True if there is valid "*.sii" file in
the Trigger Actions directory, otherwise False."""
trig_actions_abs_path = get_abs_path(_get_scs_globals().trigger_actions_rel_path)
if trig_actions_abs_path:
if os.path.isfile(trig_actions_abs_path):
return True
else:
return False
else:
return False
def is_valid_sign_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Sign Library directory, otherwise False."""
sign_library_abs_path = get_abs_path(_get_scs_globals().sign_library_rel_path)
if sign_library_abs_path:
if os.path.isfile(sign_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_tsem_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Traffic Semaphore Profile Library directory, otherwise False."""
tsem_library_abs_path = get_abs_path(_get_scs_globals().tsem_library_rel_path)
if tsem_library_abs_path:
if os.path.isfile(tsem_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_traffic_rules_library_rel_path():
"""It returns True if there is valid "*.sii" file in
the Traffic Rules Library directory, otherwise False."""
traffic_rules_library_abs_path = get_abs_path(_get_scs_globals().traffic_rules_library_rel_path)
if traffic_rules_library_abs_path:
if os.path.isfile(traffic_rules_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_hookup_library_rel_path():
"""It returns True if there is at least one "*.sii" file in
the resulting unit hookup directory or it's sub-directories, otherwise False."""
hookup_library_abs_path = get_abs_path(_get_scs_globals().hookup_library_rel_path, is_dir=True)
if hookup_library_abs_path:
for root, dirs, files in os.walk(hookup_library_abs_path):
for file in files:
if file.endswith(".sii"):
return True
return False
else:
return False
def is_valid_matsubs_library_rel_path():
"""It returns True if there is valid "*.db" file in
the Material Substance Library directory, otherwise False."""
matsubs_library_abs_path = get_abs_path(_get_scs_globals().matsubs_library_rel_path)
# print(' matsubs_library_abs_path: %r' % str(matsubs_library_abs_path))
if matsubs_library_abs_path:
if os.path.isfile(matsubs_library_abs_path):
return True
else:
return False
else:
return False
def is_valid_sun_profiles_library_path():
"""It returns True if there is valid "*.sii" file in
the Sun Profiles Library directory, otherwise False."""
sun_profiles_lib_path = get_abs_path(_get_scs_globals().sun_profiles_lib_path)
if sun_profiles_lib_path:
if os.path.isfile(sun_profiles_lib_path):
return True
else:
return False
else:
return False
def get_addon_installation_paths():
"""Returns a list of paths to the directories where the addon can be installed."""
script_paths = bpy.utils.script_paths()
addon_dirs = ('addons', 'addons_contrib')
script_locations = []
for script_path in script_paths:
for addon_dir in addon_dirs:
script_locations.append(os.path.join(script_path, addon_dir, 'io_scs_tools'))
scs_installation_dirs = []
for location in script_locations:
if os.path.isdir(location):
scs_installation_dirs.append(location)
if len(scs_installation_dirs) == 0:
lprint('''\n\nE The installation directory of "SCS Blender Tools" couldn't be detected! (Shouldn't happen!)\n''')
elif len(scs_installation_dirs) > 1:
lprint('\n\nW More than one installation of "SCS Blender Tools" detected!\n\t Please remove redundant installations so the only one '
'remain.\n')
return scs_installation_dirs
def get_shader_presets_filepath():
"""Returns a valid filepath to "shader_presets.txt" file. If the file doesn't exists,
the empty string is returned and Shader Presets won't be available."""
scs_installation_dirs = get_addon_installation_paths()
shader_presets_file = ''
for location in scs_installation_dirs:
test_path = os.path.join(location, 'shader_presets.txt')
if os.path.isfile(test_path):
shader_presets_file = test_path
break
return shader_presets_file
def get_texture_path_from_tobj(tobj_filepath, raw_value=False):
"""Get absolute path of texture from given tobj filepath.
If raw value is requested returned path is direct value written in TOBJ.
NOTE: there is no safety check if file exists.
:param tobj_filepath: absolute tobj file path
:type tobj_filepath: str
:param raw_value: flag for indicating if texture path shall be returned as it's written in TOBJ
:type raw_value: bool
:return: absolute texture file path if found or None
:rtype: str | None
"""
texture_paths = get_texture_paths_from_tobj(tobj_filepath, raw_value=raw_value, first_only=True)
if not texture_paths:
return None
return texture_paths[0]
def get_texture_paths_from_tobj(tobj_filepath, raw_value=False, first_only=False):
"""Get absolute path(s) of textures from given tobj filepath.
If raw value is requested returned path(s) are direct values written in TOBJ.
If first only is requested only first path is returned.
:param tobj_filepath: absolute tobj file path
:type tobj_filepath: str
:param raw_value: flag for indicating if texture path(s) shall be returned as it's written in TOBJ
:type raw_value: bool
:param first_only: flag for requesting only first entry from TOBJ map names (only first texture)
:type first_only: bool
:return: absolute texture file path(s) if found or None
:rtype: tuple[str] | None
"""
from io_scs_tools.internals.containers.tobj import TobjContainer
container = TobjContainer.read_data_from_file(tobj_filepath, skip_validation=raw_value)
tobj_dir, tobj_filename = os.path.split(tobj_filepath)
if container is None:
return None
if raw_value:
if first_only:
return container.map_names[0],
return tuple(container.map_names)
abs_texture_paths = []
for map_name in container.map_names:
if map_name[0] == "/":
curr_abs_tobj_path = get_abs_path("//" + map_name[1:])
else:
curr_abs_tobj_path = os.path.join(tobj_dir, map_name)
# directly intercept and return first texture path
if first_only:
return curr_abs_tobj_path,
abs_texture_paths.append(curr_abs_tobj_path)
return tuple(abs_texture_paths)
def get_texture_extens_and_strip_path(texture_path):
"""Gets all supported texture extensions and strips given input path for any of it.
:param texture_path: shader texture raw path value
:type texture_path: str
:return: list of extensions and stripped path as tuple
:rtype: tuple[list[str], str]
"""
extensions = [".tobj", ".tga", ".png"]
# strip of any extensions ( endswith is most secure, because of possible multiple extensions )
if texture_path.endswith(".tobj"):
extensions.insert(0, texture_path[-5:])
texture_path = texture_path[:-5]
elif texture_path.endswith(".tga") or texture_path.endswith(".png"):
extensions.insert(0, texture_path[-4:])
texture_path = texture_path[:-4]
return extensions, texture_path
def get_scs_texture_str(texture_string):
"""Get texture string as presented in SCS files: "/material/environment/vehicle_reflection"
without any file extensions. Input path can also have texture object extension or supported images extensions.
Path will be searched and returned in this order:
1. relative path on current SCS Project Base Path
2. relative path on parent base dirs of current SCS Project Base Path in the case of mod/dlc
3. find absolute file path
4. return unchanged texture string path
:param texture_string: texture string for which texture should be found e.g.: "/material/environment/vehicle_reflection" or absolute path
:type texture_string: str
:return: relative path to texture object or absolute path to texture object or unchanged texture string
:rtype: str
"""
scs_project_path = _get_scs_globals().scs_project_path
orig_texture_string = texture_string
# remove any directory separators left overs from different platform
texture_string = texture_string.replace("/", os.sep).replace("\\", os.sep)
extensions, texture_string = get_texture_extens_and_strip_path(texture_string)
# if texture string starts with scs project path we can directly strip of project path
if startswith(texture_string, scs_project_path):
texture_string = texture_string[len(scs_project_path):]
else: # check if texture string came from base project while scs project path is in dlc/mod folder
# first find longest matching path
try:
common_path_len = len(os.path.commonpath([scs_project_path, texture_string]))
except ValueError: # if ValueError is raised then paths do not match for sure, thus set it to 0
common_path_len = 0
nonmatched_path_part = texture_string[common_path_len + 1:]
if nonmatched_path_part.startswith("base" + os.sep) or nonmatched_path_part.startswith("base_") or nonmatched_path_part.startswith("dlc_"):
# now check if provided texture string is the same as:
# current scs project path + one or two directories up + non matched path of the part
# NOTE: find calls is inverted in relation to number of parents dirs
for infix, find_calls_count in (("..", 2), (".." + os.sep + "..", 1)):
modif_texture_string = os.path.join(scs_project_path, infix + os.sep + nonmatched_path_part)
# we got a hit if one or two directories up is the same path as texture string
if is_samepath(modif_texture_string, texture_string):
slash_idx = 0
for _ in range(0, find_calls_count):
slash_idx = nonmatched_path_part.find(os.sep, slash_idx)
# catch invalid cases that needs investigation
assert slash_idx != -1
texture_string = nonmatched_path_part[slash_idx:]
# check for relative TOBJ, TGA, PNG
for ext in extensions:
texture_path = get_abs_path("//" + texture_string.strip(os.sep) + ext)
if texture_path and os.path.isfile(texture_path):
return "//" + texture_string.replace(os.sep, "/").strip("/") + ext
# check for absolute TOBJ, TGA, PNG
for ext in extensions:
texture_path = get_abs_path(texture_string + ext, skip_mod_check=True)
if texture_path and os.path.isfile(texture_path):
return texture_string.replace(os.sep, "/") + ext
return orig_texture_string
def get_tobj_path_from_shader_texture(shader_texture, check_existance=True):
"""Gets TOBJ path from shader texture value if exists, otherwise returning None.
:param shader_texture: shader texture raw path value
:type shader_texture: str
:param check_existance: flag indicating if tobj path should be also checked for existance
:type check_existance: bool
:return: TOBJ absolute path or None if not found
:rtype: str | None
"""
# strip of any extensions ( endswith is most secure, because of possible multiple extensions )
if shader_texture.endswith(".tobj"):
tobj_filpath = shader_texture
elif shader_texture.endswith(".tga") or shader_texture.endswith(".png"):
tobj_filpath = shader_texture[:-4] + ".tobj"
else:
tobj_filpath = shader_texture + ".tobj"
# NOTE: if there is no existence check then we also shouldn't check for mods file system structure
tobj_filpath = get_abs_path(tobj_filpath, skip_mod_check=not check_existance)
if not check_existance or (tobj_filpath and os.path.isfile(tobj_filpath)):
return tobj_filpath
return None
def get_skeleton_relative_filepath(armature, directory, default_name):
"""Get's skeleton relative path to given directory. This path can be used for linking
skeletons in PIM and PIA files.
:param armature: armature object which will be used as scs skeleton
:type armature: bpy.types.Object
:param directory: directory from which relative path of skeleton should be gotten
:type directory: str
:param default_name: if custom path is empty this name will be used as the name of pis file
:type default_name: str
:return: relative path to predicted PIS file of given armature
:rtype: str
"""
skeleton_custom_dir = armature.scs_props.scs_skeleton_custom_export_dirpath
skeleton_custom_name = armature.scs_props.scs_skeleton_custom_name
skeleton_path = ""
if skeleton_custom_dir != "":
if skeleton_custom_dir.startswith("//"):
skeleton_path = os.path.relpath(os.path.join(_get_scs_globals().scs_project_path, skeleton_custom_dir[2:]), directory)
# if custom skeleton path and default skeleton path ere the same relpath will result in ".",
# now if we use that in returning join function, then our skeleton path will look like "./skeleton.pis" which is not correct.
# So instead just reset skeleton path to empty string and join will return only skeleton name as it should.
if skeleton_path == ".":
skeleton_path = ""
else:
lprint("E Custom skeleton export path is not relative to SCS Project Base Path.\n\t " +
"Custom path will be ignored, which might lead to wrongly linked skeleton file inside PIM and PIA files.")
skeleton_name = (skeleton_custom_name if skeleton_custom_name != "" else default_name) + ".pis"
return os.path.join(skeleton_path, skeleton_name)
def get_animations_relative_filepath(scs_root, directory):
"""Get's skeleton relative path to given directory. This path can be used for linking
skeletons in PIM and PIA files.
:param scs_root: scs root object of this animation
:type scs_root: bpy.types.Object
:param directory: directory from which relative path of animaton should be gotten
:type directory: str
:return: relative path to predicted PIS file of given armature
:rtype: str
"""
anims_path = ""
if scs_root.scs_props.scs_root_object_allow_anim_custom_path:
animations_custom_dir = scs_root.scs_props.scs_root_object_anim_export_filepath
if animations_custom_dir != "":
if animations_custom_dir.startswith("//"):
anims_path = os.path.relpath(os.path.join(_get_scs_globals().scs_project_path, animations_custom_dir[2:]), directory)
else:
return None
return anims_path
def get_global_export_path():
"""Gets global export path.
If default export path is empty and blend file is saved inside current scs project path -> return blend file dir;
Otherwise return scs project path combined with default export path.
:return: global export path defined by directory of saved blend file and default export path from settings
:rtype: str
"""
scs_project_path = _get_scs_globals().scs_project_path
is_blend_file_within_base = bpy.data.filepath != "" and startswith(bpy.data.filepath, scs_project_path)
default_export_path = bpy.context.scene.scs_props.default_export_filepath
# if not set try to use Blender filepath
if default_export_path == "" and is_blend_file_within_base:
return os.path.dirname(bpy.data.filepath)
else:
return os.path.join(scs_project_path, default_export_path.strip("//"))
def get_custom_scs_root_export_path(root_object):
"""Gets custom export file path for given SCS Root Object.
If custom export path is empty and blend file is saved inside current scs project path -> return blend file dir;
Otherwise return scs porject path combined with custom scs root export path.
:param root_object: scs root object
:type root_object: bpy.types.Object
:return: custom export directory path of given SCS Root Object; None if custom export for SCS Root is disabled
:rtype: str | None
"""
scs_project_path = _get_scs_globals().scs_project_path
is_blend_file_within_base = bpy.data.filepath != "" and startswith(bpy.data.filepath, scs_project_path)
custom_filepath = None
if root_object.scs_props.scs_root_object_allow_custom_path:
scs_root_export_path = root_object.scs_props.scs_root_object_export_filepath
# if not set try to use Blender filepath
if scs_root_export_path == "" and is_blend_file_within_base:
custom_filepath = os.path.dirname(bpy.data.filepath)
else:
custom_filepath = os.path.join(scs_project_path, scs_root_export_path.strip("//"))
return custom_filepath
def get_all_infixed_file_paths(filepath, include_given_path=True):
"""Gets files from same directory using any infixed word without,
however dot can not appear in infix.
:param filepath: absolute filepath which shall be checked for any infixed files
:type filepath: str
:param include_given_path: if True given file path will be included in returning list otherwise no
:type include_given_path: bool
:return: list of all infixed files; optionally given filepath can be added to result list too
:rtype: list[str]
"""
# in-fixed file paths can not be searched upon none, so return empty list
if filepath is None:
return []
infixed_filepaths = [filepath] if include_given_path else []
orig_dir, orig_file = os.path.split(filepath)
# if original directory doesn't exists skip searching for any infix files
if not os.path.isdir(orig_dir):
return infixed_filepaths
last_ext_i = orig_file.rfind(".")
orig_file_prefix = orig_file[:last_ext_i]
orig_file_postfix = orig_file[last_ext_i:]
for file in os.listdir(orig_dir):
# if given file path is already prefixed make sure to ignore it
if file == orig_file:
continue
if file.startswith(orig_file_prefix) and file.endswith(orig_file_postfix) and file.count(".") == 2:
infixed_filepaths.append(os.path.join(orig_dir, file))
return infixed_filepaths
def get_projects_paths(game_project_path):
"""Gets list of all projects inside givem game project path.
NOTE: function is not checking wether given path is real game project or not,
rather it just searches for mod and dlc projects.
:param game_project_path: directory where game repo is located
:type game_project_path: str
:return: paths of all mod and dlc projects found in game project
:rtype: list[str]
"""
project_paths = []
for dir_entry in os.listdir(game_project_path):
# projects can not be files so ignore them
if os.path.isfile(os.path.join(game_project_path, dir_entry)):
continue
if dir_entry == "base" or dir_entry.startswith("base_") or dir_entry.startswith("dlc_"):
project_paths.append(readable_norm(os.path.join(game_project_path, dir_entry)))
elif dir_entry.startswith("mod_"):
mod_dir = os.path.join(game_project_path, dir_entry)
for dir_entry2 in os.listdir(mod_dir):
# projects can not be files so ignore them
if os.path.isfile(os.path.join(mod_dir, dir_entry)):
continue
if dir_entry2 == "base" or dir_entry2.startswith("base_") or dir_entry2.startswith("dlc_"):
project_paths.append(readable_norm(os.path.join(mod_dir, dir_entry2)))
return project_paths
def startswith(path1, path2):
"""Checks if first given path starts with second given path.
It also takes into account windows drive letter which can be big or small.
:param path1: first path
:type path1: str
:param path2: second path
:type path2: str
:return: True if path1 starts with path2; False otherwise
:rtype: bool
"""
norm_path1 = full_norm(path1)
norm_path2 = full_norm(path2)
return norm_path1.startswith(norm_path2)
def is_samepath(path1, path2):
"""Checks if paths are the same
It also takes into account windows drive letter which can be big or small.
:param path1: first path
:type path1: str
:param path2: second path
:type path2: str
:return: True if path1 starts with path2; False otherwise
:rtype: bool
"""
norm_path1 = full_norm(path1)
norm_path2 = full_norm(path2)
return norm_path1 == norm_path2
def full_norm(path1):
"""Normalize path.
It also takes into account windows drive letter which can be big or small.
:param path1: path
:type path1: str
:return: normalized path
:rtype: str
"""
norm_path1 = os.path.normpath(path1)
norm_path1 = os.path.normcase(norm_path1)
return norm_path1
def readable_norm(path):
"""Normalize path in nice human readable form.
On windows it also converts backslashes to forward ones, to have cross platform output.
:param path: path to normalize
:type path: str
:return: normalized path
:rtype: str
"""
norm_path = os.path.normpath(path)
norm_path = norm_path.replace("\\", "/")
return norm_path
def ensure_symlink(src, dest):
"""Ensures symbolic link from source to destination. On Windows junction links are used
to avoid problems with link creation rights.
:param src: directory or file path from which should be taken as source for creation of symbolic link
:type src: str
:param dest: directory or file path where symbolic link should be written
:type dest: str
"""
if os.path.isdir(dest):
os.remove(dest) # use os.remove instead os.unlink, as we can't remove mklink junction with os.unlink.
if platform == "win32":
subprocess.check_call(["mklink", "/J", dest, src], shell=True)
else:
os.symlink(src, dest)
def rmtree(src):
"""Remove directory or file. In case of directory all the content inside will also be removed.
:param src: source path which should be recursively removed
:type src: str
"""
try:
shutil.rmtree(src)
except shutil.Error:
lprint("E Problem removing directory: %r", (readable_norm(src),))
def copytree(src, dest):
"""Recursively copy whole tree of given source path to destination path.
If directories doesn't exists they will be created.
If directores/files exists then content will be overwritten.
:param src: source path to copy from
:type src: str
:param dest: destination path to copy to
:type dest: str
"""
for root, dirs, files in os.walk(src):
if not os.path.isdir(root):
os.makedirs(root)
for file in files:
rel_path = root.replace(src, '').lstrip(os.sep)
dest_path = os.path.join(dest, rel_path)
if not os.path.isdir(dest_path):
os.makedirs(dest_path)
shutil.copyfile(os.path.join(root, file), os.path.join(dest_path, file))
def get_tree_size(src):
"""Return total size of files in given path and subdirs.
:param src: source path to get size from
:param src: str
"""
total = 0
if not os.path.isdir(src) and not os.path.isfile(src):
return total
for entry in os.scandir(src):
if entry.is_dir(follow_symlinks=False):
total += get_tree_size(entry.path)
else:
total += entry.stat(follow_symlinks=False).st_size
return total
|
SCSSoftware/BlenderTools
|
addon/io_scs_tools/utils/path.py
|
Python
|
gpl-2.0
| 35,485
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetShippingCosts
# Retrieves shipping costs for an item.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetShippingCosts(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetShippingCosts Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetShippingCosts, self).__init__(temboo_session, '/Library/eBay/Shopping/GetShippingCosts')
def new_input_set(self):
return GetShippingCostsInputSet()
def _make_result_set(self, result, path):
return GetShippingCostsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetShippingCostsChoreographyExecution(session, exec_id, path)
class GetShippingCostsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetShippingCosts
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((required, string) The unique identifier for the application.)
"""
super(GetShippingCostsInputSet, self)._set_input('AppID', value)
def set_DestinationCountryCode(self, value):
"""
Set the value of the DestinationCountryCode input for this Choreo. ((conditional, string) The shipment destination country code.)
"""
super(GetShippingCostsInputSet, self)._set_input('DestinationCountryCode', value)
def set_DestinationPostalCode(self, value):
"""
Set the value of the DestinationPostalCode input for this Choreo. ((conditional, string) The shipment destination postal code.)
"""
super(GetShippingCostsInputSet, self)._set_input('DestinationPostalCode', value)
def set_IncludeDetails(self, value):
"""
Set the value of the IncludeDetails input for this Choreo. ((conditional, boolean) Indicates whether to return the ShippingDetails container in the response.)
"""
super(GetShippingCostsInputSet, self)._set_input('IncludeDetails', value)
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((required, string) The ID of the item to get shipping costs for.)
"""
super(GetShippingCostsInputSet, self)._set_input('ItemID', value)
def set_QuantitySold(self, value):
"""
Set the value of the QuantitySold input for this Choreo. ((optional, string) The quantity of items being shipped.)
"""
super(GetShippingCostsInputSet, self)._set_input('QuantitySold', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetShippingCostsInputSet, self)._set_input('ResponseFormat', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((optional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
super(GetShippingCostsInputSet, self)._set_input('SandboxMode', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
super(GetShippingCostsInputSet, self)._set_input('SiteID', value)
class GetShippingCostsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetShippingCosts Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
class GetShippingCostsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetShippingCostsResultSet(response, path)
|
willprice/arduino-sphere-project
|
scripts/example_direction_finder/temboo/Library/eBay/Shopping/GetShippingCosts.py
|
Python
|
gpl-2.0
| 5,348
|
__author__ = 'williewonka'
import openpyxl
wb = openpyxl.load_workbook("json/jaartallen.xlsx")
sheet = wb.get_active_sheet()
wb_export = openpyxl.Workbook()
sheet_export = wb_export.get_active_sheet()
for cell in sheet.columns[1]:
if cell.value is None:
continue
jaar = 2014
# try:
for entry in cell.value.split(" "):
try:
jaartal = int(entry.strip(";"))
except:
continue
# jaartal = int(entry.split(" ").pop())
if jaar > jaartal > 1900:
jaar = jaartal
# except:
# continue
sheet_export.cell(row=cell.row,column=0).value = sheet.cell(row=cell.row,column=0).value
sheet_export.cell(row=cell.row,column=1).value = jaar
wb_export.save("json/jaartallen_opgeschoont.xlsx")
|
williewonka/USE-patents-project
|
jaartal_extractor.py
|
Python
|
gpl-2.0
| 783
|
#!/usr/bin/python
###############################################################################
# NAME: pyp_io.py
# VERSION: 2.0.0 (29SEPTEMBER2010)
# AUTHOR: John B. Cole, PhD (john.cole@ars.usda.gov)
# LICENSE: LGPL
###############################################################################
# FUNCTIONS:
# a_inverse_from_file()
# a_inverse_to_file()
# dissertation_pedigree_to_file()
# dissertation_pedigree_to_pedig_format()
# dissertation_pedigree_to_pedig_interest_format()
# dissertation_pedigree_to_pedig_format_mask()
# pyp_file_header()
# pyp_file_footer()
# renderTitle()
# renderBodyText()
# pickle_pedigree()
# unpickle_pedigree()
# summary_inbreeding()
# save_ijk()
# load_from_gedcom()
# save_from_gedcom()
# save_to_gedcom()
# load_from_genes()
# save_from_genes()
# save_to_genes()
# save_newanimals_to_file()
###############################################################################
## @package pyp_io
# pyp_io contains several procedures for writing structures to and reading them from
# disc (e.g. using pickle() to store and retrieve A and A-inverse). It also includes a set
# of functions used to render strings as HTML or plaintext for use in generating output
# files.
##
import logging, numpy, pickle, string, time
import pyp_utils
global LINE1
global LINE2
LINE1 = '%s' % ('='*80)
LINE2 = '%s' % ('-'*80)
##
# a_inverse_to_file() uses the Python pickle system for persistent objects to write the
# inverse of a relationship matrix to a file.
# @param pedobj A PyPedal pedigree object.
# @param ainv The inverse of a numerator relationship matrix, A, or an empty string if A is to be calculated.
# @retval True (1) on success, false (0) on failure
def a_inverse_to_file(pedobj, ainv=''):
"""
Use the Python pickle system for persistent objects to write the inverse of a relationship matrix to a file.
"""
try: logging.info('Entered a_inverse_to_file()')
except: pass
try:
from pickle import Pickler
if not ainv:
ainv = a_inverse_df(pedobj.pedigree,pedobj.kw['filetag'])
a_outputfile = '%s%s%s' % (filetag,'_a_inverse_pickled_','.pkl')
aout = open(a_outputfile,'w')
ap = pickle.Pickler(aout)
ap.dump(a)
aout.close()
_r = 1
except:
_r = 0
try: logging.info('Exited a_inverse_to_file()')
except: pass
return _r
##
# a_inverse_from_file() uses the Python pickle system for persistent objects to read the inverse of
# a relationship matrix from a file.
# @param inputfile The name of the input file.
# @retval The inverse of a numerator relationship matrix.
def a_inverse_from_file(inputfile):
"""
Use the Python pickle system for persistent objects to read the inverse of a relationship matrix from a file.
"""
try: logging.info('Entered a_inverse_from_file()')
except: pass
try:
from pickle import Pickler
ain = open(inputfile,'r')
au = pickle.Unpickler(ain)
a_inv = au.load()
except:
a_inv = numpy.zeros([1,1],Float)
try: logging.info('Exited a_inverse_from_file()')
except: pass
return a_inv
##
# dissertation_pedigree_to_file() takes a pedigree in 'asdxfg' format and writes is to a file.
# @param pedobj A PyPedal pedigree object.
# @retval True (1) on success, false (0) on failure
def dissertation_pedigree_to_file(pedobj):
"""
dissertation_pedigree_to_file() takes a pedigree in 'asdxfg' format and writes is to
a file.
"""
# This procedure assumes that the pedigree passed to it is in 'asdxfg' format.
try: logging.info('Entered dissertation_pedigree_to_file()')
except: pass
try:
length = len(pedobj.pedigree)
#print 'DEBUG: length of pedigree is %s' % (length)
outputfile = '%s%s%s' % (pedobj.kw['filetag'],'_diss','.ped')
print '\t\tWriting dissertation pedigree to %s' % (outputfile)
aout = open(outputfile,'w')
aout.write('# DISSERTATION pedigree produced by PyPedal.\n')
aout.write('% asdbxfg\n')
for l in range(length):
aout.write('%s,%s,%s,%s,%s,%s,%s\n' % pedobj.pedigree[l].animalID,pedobj.pedigree[l].sireID,pedobj.pedigree[l].damID,pedobj.pedigree[l].by, pedobj.pedigree[l].sex,pedobj.pedigree[l].fa,pedobj.pedigree[l].gen)
aout.close()
_r = 1
except:
_r = 0
try: logging.info('Exited dissertation_pedigree_to_file()')
except: pass
return _r
##
# dissertation_pedigree_to_pedig_format() takes a pedigree in 'asdbxfg' format, formats it into
# the form used by Didier Boichard's 'pedig' suite of programs, and writes it to a file.
# @param pedobj A PyPedal pedigree object.
# @retval True (1) on success, false (0) on failure
def dissertation_pedigree_to_pedig_format(pedobj):
"""
dissertation_pedigree_to_pedig_format() takes a pedigree in 'asdbxfg' format, formats
it into the form used by Didier Boichard's 'pedig' suite of programs, and writes it
to a file.
"""
try: logging.info('Entered dissertation_pedigree_to_pedig_format()')
except: pass
try:
length = len(pedobj.pedigree)
outputfile = '%s%s%s' % (pedobj.kw['filetag'],'_pedig','.ped')
aout = open(outputfile,'w')
for l in range(length):
if pedobj.pedigree[l].sex == 'm' or pedobj.pedigree[l].sex == 'M':
sex = 1
else:
sex = 2
aout.write('%s %s %s %s %s %s %s\n' % pedobj.pedigree[l].animalID,pedobj.pedigree[l].sireID,pedobj.pedigree[l].damID,pedobj.pedigree[l].by,sex,'1','1')
aout.close()
_r = 1
except:
_r = 0
try: logging.info('Exited dissertation_pedigree_to_pedig_format()')
except: pass
return _r
##
# dissertation_pedigree_to_pedig_interest_format() takes a pedigree in 'asdbxfg' format,
# formats it into the form used by Didier Boichard's parente program for the studied
# individuals file.
# @param pedobj A PyPedal pedigree object.
# @retval True (1) on success, false (0) on failure
def dissertation_pedigree_to_pedig_interest_format(pedobj):
"""
dissertation_pedigree_to_pedig_interest_format() takes a pedigree in 'asdbxfg' format,
formats it into the form used by Didier Boichard's parente program for the studied
individuals file.
"""
try: logging.info('Entered dissertation_pedigree_to_pedig_interest_format()')
except: pass
try:
length = len(pedobj.pedigree)
outputfile = '%s%s%s' % (pedobj.kw['filetag'],'_parente','.ped')
aout = open(outputfile,'w')
for l in range(length):
aout.write('%s %s\n' % pedobj.pedigree[l].animalID,'1')
aout.close()
_r = 1
except:
_r = 0
try: logging.info('Exited dissertation_pedigree_to_pedig_interest_format()')
except: pass
return _r
##
# dissertation_pedigree_to_pedig_format_mask() Takes a pedigree in 'asdbxfg' format,
# formats it into the form used by Didier Boichard's 'pedig' suite of programs, and
# writes it to a file. THIS FUNCTION MASKS THE GENERATION ID WITH A FAKE BIRTH YEAR
# AND WRITES THE FAKE BIRTH YEAR TO THE FILE INSTEAD OF THE TRUE BIRTH YEAR. THIS IS
# AN ATTEMPT TO FOOL PEDIG TO GET f_e, f_a et al. BY GENERATION.
# @param pedobj A PyPedal pedigree object.
# @retval True (1) on success, false (0) on failure
def dissertation_pedigree_to_pedig_format_mask(pedobj):
"""
dissertation_pedigree_to_pedig_format_mask() Takes a pedigree in 'asdbxfg' format,
formats it into the form used by Didier Boichard's 'pedig' suite of programs, and
writes it to a file. THIS FUNCTION MASKS THE GENERATION ID WITH A FAKE BIRTH YEAR
AND WRITES THE FAKE BIRTH YEAR TO THE FILE INSTEAD OF THE TRUE BIRTH YEAR. THIS IS
AN ATTEMPT TO FOOL PEDIG TO GET f_e, f_a et al. BY GENERATION.
"""
try: logging.info('Entered dissertation_pedigree_to_pedig_format_mask()')
except: pass
try:
length = len(pedobj.pedigree)
outputfile = '%s%s%s' % (pedobj.kw['filetag'],'_pedig_mask','.ped')
aout = open(outputfile,'w')
for l in range(length):
## mask generations (yes, this could be shorter - but this is easy to debug
mygen = float(pedobj.pedigree[l].gen)
if ( mygen > 0 and mygen <= 1.25 ): _gen = 10
elif ( mygen > 1.25 and mygen <= 1.75 ): _gen = 15
elif ( mygen > 1.75 and mygen <= 2.25 ): _gen = 20
elif ( mygen > 2.25 and mygen <= 2.75 ): _gen = 25
elif ( mygen > 2.75 and mygen <= 3.25 ): _gen = 30
elif ( mygen > 3.25 and mygen <= 3.75 ): _gen = 35
elif ( mygen > 3.75 and mygen <= 4.25 ): _gen = 40
elif ( mygen > 4.25 and mygen <= 4.75 ): _gen = 45
elif ( mygen > 4.75 and mygen <= 5.25 ): _gen = 50
elif ( mygen > 5.25 and mygen <= 5.75 ): _gen = 55
elif ( mygen > 5.75 and mygen <= 6.25 ): _gen = 60
elif ( mygen > 6.25 and mygen <= 6.75 ): _gen = 65
elif ( mygen > 6.75 and mygen <= 7.25 ): _gen = 70
elif ( mygen > 7.25 and mygen <= 7.75 ): _gen = 75
else: _gen = 0
_maskgen = 1950 + _gen
## convert sexes
if pedobj.pedigree[l].sex == 'm' or pedobj.pedigree[l].sex == 'M':
sex = 1
else:
sex = 2
aout.write('%s %s %s %s %s %s %s\n' % pedobj.pedigree[l].animalID,pedobj.pedigree[l].sireID,pedobj.pedigree[l].damID,_maskgen,sex,'1','1')
aout.close()
_r = 1
except:
_r = 0
try: logging.info('Exited dissertation_pedigree_to_pedig_format_mask()')
except: pass
return _r
##
# pyp_file_header() writes a header to a page of PyPedal output.
# @param ofhandle A Python file handle.
# @param caller A string indicating the name of the calling routine.
# @retval None
def pyp_file_header(ofhandle, caller="Unknown PyPedal routine"):
"""
pyp_file_header() writes a header to a page of PyPedal output.
"""
try:
ofhandle.write('%s\n' % ('-'*80))
ofhandle.write('Created by %s at %s\n' % (caller,pyp_utils.pyp_nice_time()))
ofhandle.write('%s\n' % ('-'*80))
except:
pass
##
# pyp_file_footer() writes a footer to a page of PyPedal output.
# @param ofhandle A Python file handle.
# @param caller A string indicating the name of the calling routine.
# @retval None
def pyp_file_footer(ofhandle, caller="Unknown PyPedal routine"):
"""
pyp_file_footer() writes a footer to a page of PyPedal output.
"""
try:
ofhandle.write('%s\n' % ('-'*80))
except:
pass
##
# renderTitle() renders page titles (produces HTML output by default).
# @param title_string String to be enclosed in HTML "H" tags
# @param title_level Size to be attached to "H" tags, e.g., "H1"
# @retval None
def renderTitle(title_string, title_level="1"):
"""
renderTitle() renders page titles (produces HTML output by default).
"""
if not PYPEDAL_OUTPUT_TYPE:
PYPEDAL_OUTPUT_TYPE = 'html'
# We are trying to keep it simple here.
if ( not title_level ) or ( title_level < 1 ) or ( title_level > 3 ):
title_level = 1
if PYPEDAL_OUTPUT_TYPE == 'h':
renderedTitle = '<H%s>%s</H%s>\n' % (title_level,title_string,title_level)
else:
_underline = '='*len(title_string)
renderedTitle = '%s\n' % (title_string,_underline)
return renderedTitle
##
# renderBodyText() renders page contents (produces HTML output by default).
# @param text_string String to be rendered with either a trailing newline or enclosed in HTNL "P" tags
# @retval None
def renderBodyText(text_string):
"""
renderBodyText() renders page contents (produces HTML output by default).
"""
if not PYPEDAL_OUTPUT_TYPE:
PYPEDAL_OUTPUT_TYPE = 'html'
if PYPEDAL_OUTPUT_TYPE == 'h':
renderedBodyText = '<p>%s</p>' % (text_string)
else:
renderedBodyText = '%s\n' % (text_string)
return renderedBodyText
##
# pickle_pedigree() pickles a pedigree.
# @param pedobj An instance of a PyPedal pedigree object.
# @param filename The name of the file to which the pedigree object should be pickled (optional).
# @retval A 1 on success, a 0 otherwise.
def pickle_pedigree(pedobj, filename=''):
"""
pickle_pedigree() pickles a pedigree.
"""
try: logging.info('Entered pickle_pedigree()')
except: pass
try:
_r = 1
if not filename:
_pfn = '%s.pkl' % ( pedobj.kw['filetag'] )
else:
_pfn = '%s.pkl' % ( filename )
print _pfn
pickle.dump(pedobj,open(_pfn,'w'))
logging.info('Pickled pedigree %s to file %s', pedobj.kw['pedname'], _pfn )
if pedobj.kw['messages'] == 'verbose':
print 'Pickled pedigree %s to file %s' % ( pedobj.kw['pedname'], _pfn )
except:
logging.error('Unable to pickle pedigree %s to file %s', self.kw['pedname'], _pfn )
_r = 0
try: logging.info('Exited pickle_pedigree()')
except: pass
return _r
##
# unpickle_pedigree() reads a pickled pedigree in from a file and returns the unpacked
# pedigree object.
# @param filename The name of the pickle file.
# @retval An instance of a NewPedigree object on success, a 0 otherwise.
def unpickle_pedigree(filename=''):
"""
unpickle_pedigree() reads a pickled pedigree in from a file and returns the unpacked
pedigree object.
"""
try: logging.info('Entered unpickle_pedigree()')
except: pass
try:
if not filename:
logging.error('No filename provided for pedigree unpickling!' )
_r = 0
else:
_ck_pfn = string.split(filename,'.')
if len(_ck_pfn) == 2:
_pfn = filename
else:
_pfn = '%s.pkl' % ( filename )
logging.info('No file extension provided for %s. An extension (.pkl) was added.', filename )
_myped = pickle.load(open(_pfn))
logging.info('Unpickled pedigree %s from file %s', _myped.kw['pedname'], _pfn )
_r = _myped
except:
logging.error('Unable to unpickle pedigree from file!' )
_r = 0
try: logging.info('Exited unpickle_pedigree()')
except: pass
return _r
##
# summary_inbreeding() returns a string representation of the data contained in
# the 'metadata' dictionary contained in the output dictionary returned by
# pyp_nrm/pyp_inbreeding().
# @param f_metadata Dictionary of inbreeding metadata.
# @retval A string on success, a 0 otherwise.
def summary_inbreeding(f_metadata):
"""
summary_inbreeding() returns a string representation of the data contained in
the 'metadata' dictionary contained in the output dictionary returned by
pyp_nrm/pyp_inbreeding().
"""
try:
_summary = ''
_summary = '%s' % (LINE1)
_summary = '%s\n%s' % (_summary, 'Inbreeding Statistics')
_summary = '\n%s\n%s' % (_summary, LINE1)
_summary = '%s\n%s' % (_summary, 'All animals:')
_summary = '\n%s\n%s' % (_summary, LINE2)
for k,v in f_metadata['all'].iteritems():
_line = '\t%s\t%s' % (k,v)
_summary = '%s\n%s' % (_summary, _line)
_summary = '\n%s\n%s' % (_summary, LINE1)
_summary = '%s\n%s' % (_summary, 'Animals with non-zero CoI:')
_summary = '\n%s\n%s' % (_summary, LINE2)
for k,v in f_metadata['nonzero'].iteritems():
_line = '\t%s\t%s' % (k,v)
_summary = '%s\n%s' % (_summary, _line)
_summary = '\n%s\n%s' % (_summary, LINE1)
return _summary
except:
return '0'
##
# save_ijk() saves an NRM to a file in the form "animal A" "animal B" "rAB".
# @param pedobj: The pedigree to which the NRM is attached
# @param nrm_filename: The file to which the matrix should be written.
# @retval A save status indicator (0: failed, 1: success).
def save_ijk(pedobj, nrm_filename):
"""
save_ijk() saves an NRM to a file in the form "animal A" "animal B" "rAB".
"""
if pedobj.kw['messages'] == 'verbose':
print '[INFO]: Saving A-matrix to file %s at %s.' % ( nrm_filename, pyp_utils.pyp_nice_time() )
logging.info('Saving A-matrix to file %s', nrm_filename)
#try:
of = file(nrm_filename,"w")
for i in range(pedobj.metadata.num_records):
for j in range(i,pedobj.metadata.num_records):
line = '%s %s %s\n' % ( pedobj.backmap[i+1], pedobj.backmap[j+1], pedobj.nrm.nrm[i,j])
of.write(line)
of.close()
if pedobj.kw['messages'] == 'verbose':
print '[INFO]: A-matrix successfully saved to file %s at %s.' % ( nrm_filename, pyp_utils.pyp_nice_time() )
logging.info('A-matrix successfully saved to file %s', nrm_filename)
return 1
##
# load_from_gedcom() reads and parses pedigree data that conform to
# a subset of the GEDCOM 5.5 specification. Not all valid GEDCOM
# are supported; unsupported tags are ignored.
# @param infilename The file to which the matrix should be written.
# @param messages Controls output to the screen
# @param standalone Uses logging if called by a NewPedigree method
# @param missing_sex Value assigned to an animal with unknown sex
# @param missing_parent Value assigned to unknown parents
# @param missing_name Name assigned by default
# @param missing_byear VAlue assigned to unknown birth years
# @param debug Flag turning debugging messages on (1) and off (0)
# @retval A save status indicator (0: failed, 1: success).
def load_from_gedcom(infilename, messages='verbose', standalone=1, missing_sex='u', \
missing_parent=0, missing_name='Unknown Name', missing_byear='0001', debug=0):
"""
load_from_gedcom() reads and parses pedigree data that conform to
a subset of the GEDCOM 5.5 specification. Not all valid GEDCOM
are supported; unsupported tags are ignored.
"""
# NOTE: There is a lot of error-checking that could be done here but isn't. That
# behavior is a deliberate design decision. load_from_gedcom() is NOT intended to
# be a general-purpose GEDCOM parser, but rather is supposed to convert a GEDCOM
# file into a pedigree format that PyPedal can load into a NewPedigree object with
# no trouble. The error-checking and related operations, such as automatically adding
# pedigree entries for parents with no records themselves, are left to the preprocess()
# and load() methods of the NewPedigree and NewAnimal classes.
#
# If someone wants to take this function and extend it to be more general that's fine,
# but I ask that they create a new function that extends this function rather than
# altering the no-doubt imperfect behavior of load_from_gedcom().
known_tags = ['BIRT','CHIL','DATE','FAM','FAMC','FAMS','HUSB', \
'INDI','NAME','SEX','WIFE']
current_level, any_names, any_sexes, any_birth, get_birth = 0, 0, 0, 0, 0
indi, fam = {}, {}
# Structures for mapping data
fam2husb, fam2wife, fam2chil, indi2name, indi2sex, indi2famc, indi2fams, indi2birth = {}, {}, {}, {}, {}, {}, {}, {}
# Read the file
if standalone == 0: logging.info('[load_from_gedcom]: Opening GEDCOM pedigree file %s.',infilename)
infile = open(infilename,'r')
inlines = infile.readlines()
infile.close()
all_done = 0 # Have we processed all of the input lines?
next_zero = 0 # Have we seen the next zero?
zero_mark = 0 # If so, where?
last_round = 0
try:
while not all_done:
next_zero = 0
_curr_rec = []
for l in xrange(zero_mark,len(inlines)+1):
# I don't think that all GEDCOM 5.5 0-level records contain three tokens, but
# the INDI and FAM records do, and those are the records we want.
try:
linelist = inlines[l].strip().split(' ')
if len(linelist) > 1:
if linelist[0] == '0' and l > zero_mark:
next_zero = 1
zero_mark = l
break
else:
_curr_rec.append(inlines[l])
except IndexError:
all_done = 1
if messages == 'debug':
print '-'*80
print _curr_rec
print '-'*80
firstlinelist = _curr_rec[0].strip().rstrip('\r').rstrip('\n').split(' ')
if firstlinelist[-1] == 'INDI':
_tag, _name, _sex, _famc, _fams, _byr = 0, 0, 0, 0, [], 0
any_sexes, any_birth, get_birth = 0, 0, 0
for c in xrange(1,len(_curr_rec)):
linelist = _curr_rec[c].strip().rstrip('\r').rstrip('\n').split(' ')
if c == 1:
_indi = _curr_rec[0].strip().split(' ')[1][1:-1]
if len(linelist) > 1:
_tag = linelist[1].upper()
else: break
if _tag in known_tags:
if messages == 'debug':
print 'Processing tag %s' % ( _tag )
if _tag.upper() == 'NAME':
_name = ' '.join(linelist[2:])
elif _tag.upper() == 'SEX':
_sex = linelist[2]
any_sexes = 1
elif _tag.upper() == 'FAMC':
_famc = linelist[2][1:-1]
elif _tag.upper() == 'FAMS':
_fams.append(linelist[2][1:-1])
# The only dates handled right now are birth dates
elif _tag.upper() == 'BIRT':
get_birth = 1
elif _tag.upper() == 'DATE' and get_birth:
_byr = ' '.join(linelist[2:])[-4:]
get_birth = 0
any_birth = 1
else: pass
else:
if messages == 'debug':
print 'Skipping unknown tag %s' % ( _tag )
# Once we've read the entire record we can process the data.
# Here's the basic idea: put the details from the INDI and
# FAM records into dictionaries (lookup tables) as we sweep
# the input data. Once we've done that we'll assemble the
# complete animal records from the pieces in the relevant
# lookup tables.
if c == len(_curr_rec)-1:
if not any_sexes:
_sex = missing_sex.upper()
indi2sex[_indi] = _sex
if _byr:
indi2birth[_indi] = _byr
else:
indi2birth[_indi] = missing_byear
if _famc:
indi2famc[_indi] = _famc
if _fams:
indi2fams[_indi] = _fams
if '_name':
if not any_names: any_names = 1
indi2name[_indi] = _name
else:
id2name[_id] = missing_name
elif firstlinelist[-1] == 'FAM':
_husb, _wife,_child = 0, 0, []
end_family = 0
for c in xrange(1,len(_curr_rec)):
linelist = _curr_rec[c].strip().rstrip('\r').rstrip('\n').split(' ')
if c == 1:
_fam = _curr_rec[0].strip().split(' ')[1][1:-1]
if len(linelist) > 1:
_tag = linelist[1].upper()
else: break
if _tag in known_tags:
if messages == 'debug':
print 'Processing tag %s' % ( _tag )
if _tag.upper() == 'HUSB':
_husb = linelist[2][1:-1]
elif _tag.upper() == 'WIFE':
_wife = linelist[2][1:-1]
elif _tag.upper() == 'CHIL':
_child.append(linelist[2][1:-1])
else:
if messages == 'debug':
print 'Skipping unknown tag %s' % ( _tag )
if c == len(_curr_rec)-1:
if _husb:
fam2husb[_fam] = _husb
if _wife:
fam2wife[_fam] = _wife
if _child:
for _ch in _child:
fam2chil[_fam] = {}
fam2chil[_fam][_ch] = _ch
if debug:
print 'indi2sex: ', indi2sex
print 'indi2birth: ', indi2birth
print 'indi2name: ', indi2name
print 'indi2famc: ', indi2famc
print 'indi2fams: ', indi2fams
print 'fam2husb: ', fam2husb
print 'fam2wife: ', fam2wife
# Now we walk through the INDI records and assemble everything. Note
# that there is no error-checking here. I don't want to have error-
# checking code in two different places, namely here and in the
# NewPedigree::preprocess() method. Therefore, all error-checking is
# deferred to the class method.
assembled = {}
for i in indi2sex.keys():
assembled[i] = {}
assembled[i]['indi'] = i
assembled[i]['sex'] = indi2sex[i]
assembled[i]['birth'] = indi2birth[i]
assembled[i]['name'] = indi2name[i]
try:
assembled[i]['sire'] = fam2husb[indi2famc[i]]
except KeyError:
assembled[i]['sire'] = missing_parent
try:
assembled[i]['dam'] = fam2wife[indi2famc[i]]
except KeyError:
assembled[i]['dam'] = missing_parent
if debug:
print 'assembled: ', assembled
if messages == 'verbose':
print '[INFO]: Successfully imported pedigree from the GEDCOM file %s!' \
% ( infilename )
logging.info('Successfully imported pedigree from the GEDCOM file %s!',infilename)
except:
if messages == 'verbose':
print '[ERROR]: Unable to import pedigree from the GEDCOM file %s!' \
% ( infilename )
logging.error('Unable to import pedigree from the GEDCOM file %s!',infilename)
# Save the GEDCOM file in ASD format and update the
# the value of pedfile.
try:
outfilename = '%s.tmp' % ( infilename )
pedformat = save_from_gedcom(outfilename,assembled)
except:
pedformat = 'xxxx'
return pedformat
##
# save_from_gedcom() takes pedigree data parsed by load_from_gedcom() and
# writes it to a text file in an ASD format that PyPedal can easily read.
# @param outfilename The file to which the records should be written.
# @param assembled A list of records read from a GEDCOM input file
# @retval A string containing the pedigree format code. 'xxxx' if there was a problem.
def save_from_gedcom(outfilename, assembled):
"""
save_from_gedcom() takes pedigree data parsed by load_from_gedcom() and
writes it to a text file in an ASD format that PyPedal can easily read.
"""
pedformat = 'xxxx'
try:
ofh = file(outfilename,'w')
for _i in assembled.keys():
pedformat = 'ASDxbu'
outstring = '%s,%s,%s,%s,%s,%s\n' % ( \
assembled[_i]['indi'], \
assembled[_i]['sire'], \
assembled[_i]['dam'], \
assembled[_i]['sex'], \
assembled[_i]['birth'], \
assembled[_i]['name'], \
)
ofh.write(outstring)
ofh.close()
logging.info('Saved GEDCOM pedigree to the file %s!',outfilename)
except:
logging.error('Unable to save GEDCOM pedigree to the file %s!',outfilename)
return pedformat
##
# save_to_gedcom() writes a PyPedal NewPedigree object to a file in
# GEDCOM 5.5 format.
# @param pedobj An instance of a PyPedal NewPedigree object
# @param outfilename The file to which the matrix should be written
# @retval A save status indicator (0: failed, 1: success).
def save_to_gedcom(pedobj, outfilename):
"""
save_to_gedcom() writes a PyPedal NewPedigree object to a file in
GEDCOM 5.5 format.
"""
try:
ofh = file(outfilename,'w')
# Write file header
ofh.write('0 HEAD\n')
ofh.write('1 SOUR PYPEDAL\n')
ofh.write('2 VERS V2.0\n')
ofh.write('2 CORP USDA-ARS-BA-ANRI-AIPL\n')
ofh.write('1 DEST PYPEDAL\n')
ofh.write('1 DATE %s\n' % (time.strftime('%m %d %Y', \
(time.localtime(time.time())))) )
ofh.write('1 FILE %s\n' % (pedobj.kw['pedfile']) )
ofh.write('1 GEDC\n')
ofh.write('2 VERS 5.5\n')
ofh.write('2 FORM Lineage-Linked\n')
ofh.write('1 CHAR ASCII\n')
# Fill the file
indi = {}
fam = {}
par2spouses = {}
# Sweep the pedigree once to find allnon-founders and map the founders
# to families so that we can correctly assign FAMS tags to founder records.
for p in pedobj.pedigree:
if p.sireID != pedobj.kw['missing_parent'] or p.damID != pedobj.kw['missing_parent']:
if not par2spouses.has_key(p.sireID):
par2spouses[p.sireID] = []
if not par2spouses.has_key(p.damID):
par2spouses[p.damID] = []
if p.sireName == pedobj.kw['missing_name'] and p.damName == pedobj.kw['missing_name']:
_spouses = 'F0_0'
if p.sireID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.sireID]:
par2spouses[p.sireID].append(_spouses)
if p.damID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.damID]:
par2spouses[p.damID].append(_spouses)
elif p.sireName == pedobj.kw['missing_name']:
_spouses = 'F%s' % ( p.damName )
if p.damID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.damID]:
par2spouses[p.damID].append(_spouses)
elif p.damName == pedobj.kw['missing_name']:
_spouses = 'F%s' % ( p.sireName )
if p.sireID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.sireID]:
par2spouses[p.sireID].append(_spouses)
else:
_spouses = 'F%s_%s' % ( p.sireName, p.damName )
if p.sireID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.sireID]:
par2spouses[p.sireID].append(_spouses)
if p.damID != pedobj.kw['missing_parent']:
if _spouses not in par2spouses[p.damID]:
par2spouses[p.damID].append(_spouses)
# Create INDI and FAM records for each animal in the pedigree.
for p in pedobj.pedigree:
if p.sireName == pedobj.kw['missing_name'] and p.damName == pedobj.kw['missing_name']:
_fam = 'F0_0'
elif p.sireName == pedobj.kw['missing_name']:
_fam = 'F%s' % ( p.damName )
elif p.damName == pedobj.kw['missing_name']:
_fam = 'F%s' % ( p.sireName )
else:
_fam = 'F%s_%s' % ( p.sireName, p.damName )
if not indi.has_key(p.animalID):
indi[p.animalID] = '0 @%s@ INDI\n' % \
( pedobj.namebackmap[pedobj.backmap[p.animalID]] )
indi[p.animalID] = '%s1 SEX %s\n' % ( indi[p.animalID], p.sex.upper() )
if 'n' in pedobj.kw['pedformat']:
indi[p.animalID] = '%s1 NAME %s\n' % ( indi[p.animalID], p.name )
elif 'u' in pedobj.kw['pedformat']:
indi[p.animalID] = '%s1 NAME %s\n' % ( indi[p.animalID], p.userField )
else:
pass
if 'y' in pedobj.kw['pedformat'] and p.by != pedobj.kw['missing_byear']:
indi[p.animalID] = '%s1 BIRT\n' % ( indi[p.animalID] )
indi[p.animalID] = '%s2 DATE %s\n' % ( indi[p.animalID], p.by )
if 'b' in pedobj.kw['pedformat'] and ( p.bd != pedobj.kw['missing_bdate'] and p.bd != str(pedobj.kw['missing_byear']) ):
indi[p.animalID] = '%s1 BIRT\n' % ( indi[p.animalID] )
indi[p.animalID] = '%s2 DATE %s\n' % ( indi[p.animalID], p.bd )
if _fam != 'F0_0':
indi[p.animalID] = '%s1 FAMC @%s@\n' % ( indi[p.animalID], _fam )
if par2spouses.has_key(p.animalID):
for _p2s in par2spouses[p.animalID]:
if _p2s != 'F0_0':
indi[p.animalID] = '%s1 FAMS @F%s@\n' % ( indi[p.animalID], _p2s )
# Create the family if it does not yet exist
if not fam.has_key(_fam) and _fam[3:7] != 'F0_0':
fam[_fam] = '0 @%s@ FAM\n' % ( _fam )
if 'HUSB' not in fam[_fam] and p.sireName != pedobj.kw['missing_name']:
fam[_fam] = '%s1 HUSB @%s@\n' % ( fam[_fam],p.sireName )
if 'WIFE' not in fam[_fam] and p.damName != pedobj.kw['missing_name']:
fam[_fam] = '%s1 WIFE @%s@\n' % ( fam[_fam], p.damName )
fam[_fam] = '%s1 CHIL @%s@\n' % ( fam[_fam], \
pedobj.namebackmap[pedobj.backmap[p.animalID]] )
#print fam[_fam]
# Now loop and write the contents of indi and fam to the file.
for i in indi.values():
ofh.write(i)
for f in fam.values():
ofh.write(f)
# Write footer and close file
ofh.write('0 TRLR\n')
ofh.close()
if pedobj.kw['messages'] == 'verbose':
print '[INFO]: Successfully exported pedigree to the GEDCOM file %s!' \
% ( outfilename )
logging.info('Successfully exported pedigree to the GEDCOM file %s!',outfilename)
return 1
except:
if pedobj.kw['messages'] == 'verbose':
print '[ERROR]: Unable to save pedigree to the GEDCOM file %s!' \
% ( outfilename )
logging.error('Unable to export pedigree to the GEDCOM file %s!',outfilename)
return 0
##
# load_from_genes() reads and parses pedigree data that conforms to
# the DBF format used by GENES software for pedigree management v1.2
# (R. Lacey, http://www.vortex9.org/genes.html). When possible data
# are mapped into similar PyPedal fields.
# @param infilename The file from which the pedigree should be read.
# @param messages Controls output to the screen
# @param standalone Uses logging if called by a NewPedigree method
# @param missing_sex Value assigned to an animal with unknown sex
# @param missing_parent Value assigned to unknown parents
# @param missing_name Name assigned by default
# @param missing_bdate Value assigned to unknown birthdates
# @param debug Flag turning debugging messages on (1) and off (0)
# @retval A save status indicator (0: failed, 1: success).
def load_from_genes(infilename, messages='verbose', standalone=1, missing_sex='u', \
missing_parent=0, missing_name='Unknown Name', missing_bdate='01011900', debug=0):
"""
load_from_genes() reads and parses pedigree data from the dBase III
files used by GENES 1.20 (http://www.vortex9.org/genes.html).
"""
import struct, datetime, decimal, itertools
record_list = []
f = open(infilename, 'rb')
### Begin code of Raymond Hettinger's taken from http://code.activestate.com/recipes/362715/
### and modified slightly for PyPedal.
# fields is list of field names and field specs: (type, size, decimal places).
# record_list contains data records.
# If a record is marked as deleted, it is skipped.
#
# File should be opened for binary reads.
numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32))
numfields = (lenheader - 33) // 32
fields = []
for fieldno in xrange(numfields):
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = name.replace('\0', '') # eliminate NULs from string
fields.append((name, typ, size, deci))
#yield [field[0] for field in fields]
#print fields
#if debug:
# print [field[0] for field in fields]
#yield [tuple(field[1:]) for field in fields]
#if debug:
# print [tuple(field[1:]) for field in fields]
terminator = f.read(1)
assert terminator == '\r'
fields.insert(0, ('DeletionFlag', 'C', 1, 0))
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields])
fmtsiz = struct.calcsize(fmt)
for i in xrange(numrec):
record = struct.unpack(fmt, f.read(fmtsiz))
if record[0] != ' ':
continue # deleted record
result = []
for (name, typ, size, deci), value in itertools.izip(fields, record):
if name == 'DeletionFlag':
continue
if typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = 0
elif deci:
value = value.replace('\x1a', '').lstrip() # String ctrl-Z line ending
value = decimal.Decimal(value)
value = float(value)
else:
value = int(value)
elif typ == 'D':
y, m, d = value[:4], value[4:6], value[6:8]
value=d+"/"+m+"/"+y
elif typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
elif typ == 'F':
value = float(value)
result.append(value)
record_list.append(result)
### End code of Raymond Hettinger's taken from http://code.activestate.com/recipes/362715/
f.close()
# Check inputs visually
#if debug:
# for r in record_list:
# print r
# print [field[0] for field in fields]
# Here we go...
field_list = [field[0] for field in fields]
assembled = {}
# As is also the case for GEDCOM record processing, no consistency checks are performed in this
# subroutine. The only "edits" made are to insure that values, e.g. sex codes, are consistent with
# the values expected by PyPedal.
for r in record_list:
anidx = field_list.index('STUD_ID') - 1 # The subtraction deals with the DeletionFlag
assembled[r[anidx]] = {}
assembled[r[anidx]]['indi'] = r[anidx].strip()
# Unlike GENES, PyPedal does not distinguish among animals that are known founders and those that
# have unknown parents but are not founders.
assembled[r[anidx]]['sire'] = r[field_list.index('SIRE_ID') - 1].strip() # " UNK" = unknown (" WILD" = founder)
assembled[r[anidx]]['dam'] = r[field_list.index('DAM_ID') - 1].strip() # " UNK" = unknown (" WILD" = founder)
if assembled[r[anidx]]['sire'].strip() == 'UNK': assembled[r[anidx]]['sire'] = missing_parent
if assembled[r[anidx]]['sire'].strip() == 'WILD': assembled[r[anidx]]['sire'] = missing_parent
if assembled[r[anidx]]['dam'].strip() == 'UNK': assembled[r[anidx]]['dam'] = missing_parent
if assembled[r[anidx]]['dam'].strip() == 'WILD': assembled[r[anidx]]['dam'] = missing_parent
assembled[r[anidx]]['name'] = r[anidx].strip()
# Sex codes are different
assembled[r[anidx]]['sex'] = r[field_list.index('SEX') - 1] # 0 = female, 1 = male
if str(assembled[r[anidx]]['sex']) == '0': assembled[r[anidx]]['sex'] = 'f'
elif str(assembled[r[anidx]]['sex']) == '1': assembled[r[anidx]]['sex'] = 'm'
else: assembled[r[anidx]]['sex'] = missing_sex
# Flip alive/dead status
assembled[r[anidx]]['alive'] = r[field_list.index('DEAD') - 1] # T/F indicating whether the animal is dead (T = dead)
if assembled[r[anidx]]['alive'] == 'T': assembled[r[anidx]]['alive'] = '1'
else: assembled[r[anidx]]['alive'] = '0'
# Inbreeding
assembled[r[anidx]]['fa'] = r[field_list.index('INBREED') - 1]
# Birthdates can be missing
assembled[r[anidx]]['bd'] = r[field_list.index('BDATE') - 1]
pieces = assembled[r[anidx]]['bd'].split("/")
assembled[r[anidx]]['bd'] = '%s%s%s' % ( pieces[0], pieces[1], pieces[2] )
if assembled[r[anidx]]['bd'] == '': assembled[r[anidx]]['bd'] = missing_bdate
# Use herd as a proxy for location
assembled[r[anidx]]['herd'] = r[field_list.index('LOCATION') - 1].strip()
# Age should be okay as-is
assembled[r[anidx]]['age'] = r[field_list.index('AGE') - 1]
#if debug:
# print assembled[r[anidx]]
if messages == 'verbose':
print '[INFO]: Successfully imported pedigree from the GENES 1.20 file %s!' \
% ( infilename )
logging.info('Successfully imported pedigree from the GENES 1.20 file %s!',infilename)
# Save the GENES file in ASD format and update the
# the value of pedfile.
try:
outfilename = '%s.tmp' % ( infilename )
pedformat = save_from_genes(outfilename,assembled)
except:
pedformat = 'xxxx'
return pedformat
##
# save_from_genes() takes pedigree data parsed by load_from_genes() and
# writes it to a text file in an ASD format that PyPedal can easily read.
# @param outfilename The file to which the records should be written.
# @param assembled A list of records read from a GENES 1.20 input file
# @retval A string containing the pedigree format code. 'xxxx' if there was a problem.
def save_from_genes(outfilename, assembled):
"""
save_from_genes() takes pedigree data parsed by load_from_genes() and
writes it to a text file in an ASD format that PyPedal can easily read.
"""
pedformat = 'xxxx'
try:
ofh = file(outfilename,'w')
for _i in assembled.keys():
pedformat = 'ASDefHlnxy'
outstring = '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % ( \
assembled[_i]['indi'], \
assembled[_i]['sire'], \
assembled[_i]['dam'], \
assembled[_i]['age'], \
assembled[_i]['fa'], \
assembled[_i]['herd'], \
assembled[_i]['alive'], \
assembled[_i]['name'], \
assembled[_i]['sex'], \
assembled[_i]['bd'], \
)
ofh.write(outstring)
ofh.close()
logging.info('Saved GENES 1.20 pedigree to the file %s!',outfilename)
except:
logging.error('Unable to save GENES 1.20 pedigree to the file %s!',outfilename)
return pedformat
##
# save_to_genes() writes a PyPedal NewPedigree object to a file in
# GENES 1.20 (dBase III) format.
# @param pedobj An instance of a PyPedal NewPedigree object
# @param outfilename The file to which the matrix should be written
# @retval A save status indicator (0: failed, 1: success).
def save_to_genes(pedobj, outfilename):
"""
save_to_genes() writes a PyPedal NewPedigree object to a file in
GENES 1.20 (dBase III) format.
"""
import struct, datetime, decimal, itertools
# These are the field names and data-types per the specification in Lacy's GENES.DOC.
fields = [ ('STUD_ID', 'C', 6, 0), ('DAM_ID', 'C', 6, 0), ('SIRE_ID', 'C', 6, 0), ('BDATE', 'D', 8, 0),
('SEX', 'N', 1, 0), ('DATEOUT', 'D', 8, 0), ('DEATHDATE', 'D', 8, 0), ('LOCATION', 'C', 9, 0),
('SELECTED', 'L', 1, 0), ('DEAD', 'L', 1, 0), ('INBREED', 'N', 8, 4), ('AGE', 'N', 3, 0),
('KNOWN', 'N', 8, 4), ('INBREED_KN', 'N', 8, 4), ('MK', 'N', 8, 4), ('MK_KN', 'N', 8, 4),
('KV', 'N', 8, 4), ('KV_KN', 'N', 8, 4), ('VX', 'N', 8, 4), ('GU_ALL', 'N', 8, 4),
('GU_DESC', 'N', 8, 4), ('PR_LOST', 'N', 8, 4) ]
# Dictionary to map GENES fields to PyPedal NewAnimal attributes
pyp2genes = { 'STUD_ID': 'originalID', 'SIRE_ID': 'sireName', 'DAM_ID': 'damName', 'STUD_ID': 'animalID', 'BDATE': 'bd',
'SEX': 'sex', 'LOCATION': 'originalHerd', 'DEAD': 'alive', 'INBREED': 'fa', 'AGE': 'age'
}
# Go ahead and try exporting the pedigree.
try:
### Begin code of Raymond Hettinger's taken from http://code.activestate.com/recipes/362715/
### and modified slightly for PyPedal.
f = open(outfilename, 'wb')
# Write header info
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(pedobj.pedigree)
numfields = len(fields)
lenheader = numfields * 32 + 33
lenrecord = sum(field[2] for field in fields) + 1
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord)
f.write(hdr)
# Write field specs
for fld in fields:
(name, typ, size, deci) = fld
name = name.ljust(11, '\x00')
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
# terminator
f.write('\r')
# Write individual records for record in records:
for p in pedobj.pedigree:
f.write(' ') # deletion flag
for fld in fields:
(name, typ, size, deci) = fld
# Assign values or blanks, depending on whether or not PyPedal and GENES information
# are correspondent.
if pyp2genes.has_key(name):
value = getattr(p, pyp2genes[name])
else:
value = ' '
# We have to do a little re-mapping to deal with, e.g., sex codes.
if value == pedobj.kw['missing_name']: value = ' UNK'
if name == 'SEX':
if value == 'f': value = 0
elif value == 'm': value = 1
else: value = 0 # Assume that unknown sex animals are females
if name == 'DEAD':
if value == 0: value = 'T'
else: value = 'F'
# Now cast everything and pad out fields to the correct length.
if typ == 'N':
value = str(value).rjust(size, ' ')
elif typ == 'D':
if value == ' ':
value = ' '*size
else:
value = '%s%s' % ( value[4:8], value[0:4] )
elif typ == 'L':
value = str(value)[0].upper()
else:
# If we have values that exceed the width of the field truncate them and warn the user.
if len(str(value)) > size:
value = str(value[0:size+1])
if messages == 'verbose':
print '[WARNING]: Truncated field %s while exporting to GENES 1.20 file %s!' % ( name, outfilename )
logging.warn('Truncated field %s while exporting to GENES 1.20 file %s!', name, outfilename)
value = str(value)[:size].ljust(size, ' ')
# Double-check that lengths are correct before writing the record
assert len(value) == size
f.write(value)
# End-of-file marker
f.write('\x1A')
### End code of Raymond Hettinger's taken from http://code.activestate.com/recipes/362715/
### and modified slightly for PyPedal.
f.close()
if pedobj.kw['messages'] == 'verbose':
print '[INFO]: Successfully exported pedigree to the GENES file %s!' \
% ( outfilename )
logging.info('Successfully exported pedigree to the GENES file %s!',outfilename)
return 1
# If the pedigree could not be exported then tell the user that something went wrong.
except:
if pedobj.kw['messages'] == 'verbose':
print '[ERROR]: Unable to export pedigree to the GENES file %s!' \
% ( outfilename )
logging.error('Unable to export pedigree to the GENES file %s!',outfilename)
return 0
##
# save_newanimals_to_file() take a list of PyPedal NewAnimal objects as input and writes them to a pedigree file.
# @param animal_list A list of PyPedal NewAnimal bjects
# @param filename The nae of the file t which the animals should be written
# @param pedformat Pedigree format code for the output file
def save_newanimals_to_file(animal_list, filename, pedformat, sepchar):
if len(animal_list) == 0:
pass
else:
# First, save the unique animals from the union of pedigrees a and
# b based on the match rule. Note that the pedformat from the first
# pedigree passed to __add__() will be used for both pedigrees. This
# makes sense because you cannot have two different pedformats in
# the same file.
try:
f = open(filename, 'w')
for animal in animal_list:
_outstring = ''
for pf in pedformat:
if originalID == False:
value = getattr(_a, self.new_animal_attr[pf])
else:
if pf in['a','A']:
value = _a.originalID
# This cascade may break if the pedigree is not
# renumbered...
elif pf in['s','S']:
if _a.sireID != self.kw['missing_parent']:
value = self.pedigree[_a.sireID-1].originalID
else:
value = 0
elif pf in['d','D']:
if _a.damID != self.kw['missing_parent']:
value = self.pedigree[_a.damID-1].originalID
else:
value = 0
else:
value = getattr(_a, self.new_animal_attr[pf])
# If we don't catch the special case of the first entry
# in an output line the a sepchar always will be the
# first character in the line.
if len(_outstring) > 0:
_outstring = '%s%s%s' % ( _outstring, sepchar, value )
else:
_outstring = '%s' % ( value )
ofh.write( '%s\n' % (_outstring) )
f.close()
except:
pass
|
wintermind/pypedal
|
PyPedal/pyp_io.py
|
Python
|
gpl-2.0
| 50,964
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: st dub 8 21:37:34 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\x3b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x04\x08\x1a\x33\x54\xb0\xff\xb8\x00\x00\x00\xc8\x49\x44\
\x41\x54\x58\xc3\xed\x97\xbb\x0d\xc2\x30\x14\x45\x8f\x4d\x10\x35\
\x1b\x44\x82\x09\x32\x02\x14\xf4\x08\x18\xe1\x2d\x40\x17\xea\x88\
\x8a\x05\x3c\x02\x88\x05\x28\xb2\x01\x1b\x40\x91\x31\x28\x50\x68\
\x8c\xe4\xb8\x77\xdc\xbc\xdb\x3d\xcb\xd2\x3d\xfe\x34\xc7\x00\x88\
\xc8\x0e\x38\x02\x4b\xc0\x92\x36\x3d\xf0\x06\x2e\xce\xb9\xab\x11\
\x91\x2d\x70\x27\x4f\xf6\x16\x38\x91\x2f\x75\x01\x94\xc1\xc2\x13\
\x68\x81\x49\xa2\xc2\x2f\xb0\x06\x2a\x3f\x97\x05\x60\x82\x0d\x0f\
\xe7\x5c\x9d\xf2\xc8\x22\x72\x0e\x00\x7a\xeb\x3f\xc5\x3f\xb3\x11\
\xae\x7d\xd0\x61\xc9\x1c\x05\x50\x00\x05\x50\x00\x05\x50\x00\x05\
\x50\x00\x05\x50\x80\x18\xe0\x33\x42\xe7\xa0\x23\xf6\x82\x8d\x88\
\x4c\x13\x8b\xc9\x2a\x06\xe8\x80\xb9\x9f\xab\x40\x1a\xc6\x48\x67\
\x81\x26\xe3\x17\x68\x8c\xd7\xa5\x83\xd7\xf3\x45\xf4\x24\xa9\xf4\
\xfc\xe5\xf5\xfc\xf6\x03\x48\xbf\x23\xef\x69\x00\xae\x23\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xe1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x04\x00\x00\x00\xd9\x73\xb2\x7f\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x44\x24\x00\x00\x44\x24\x01\x40\x67\xc4\x54\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xdf\x04\x08\x15\x17\x0b\x97\x34\
\xb6\xf0\x00\x00\x01\xaf\x49\x44\x41\x54\x48\xc7\xed\xd4\xb1\x6a\
\x93\x51\x14\xc0\xf1\x9f\x86\x66\xd2\x76\xb0\x3a\x7c\xa4\x20\x24\
\x92\x60\xc7\x40\x97\x76\x77\x10\x5a\xe8\x4b\x74\x48\xa5\x8f\x20\
\x74\xd0\x17\x48\x71\xe8\x53\x18\xa7\x66\xe9\xd0\x16\xc4\x36\x8f\
\xe0\x94\xc2\x37\x68\x11\x63\x28\x14\xa5\x71\x68\xf8\x7a\xef\x4d\
\x88\xa2\x6b\xcf\x76\xb9\xe7\xff\xe7\xde\x73\xce\xbd\xdc\xc5\xbd\
\x70\xd1\x64\xce\x92\x0b\xdf\xe9\x25\x89\x4d\x58\xf0\x48\xdf\xcf\
\x70\xef\x7e\x82\xb7\x1c\xda\x57\x19\x03\x31\x5e\xb1\xef\x50\xcb\
\x5c\xb8\x57\x8a\xf0\x57\x76\x3d\xb6\xac\xea\xd8\x20\x93\xc7\x78\
\xdb\xa6\x05\x6b\x2e\x9d\x65\xd7\x79\x2c\x68\x52\xb2\x6d\xd7\x03\
\xd0\x08\x15\x05\xbe\x01\xca\x56\x0d\x9d\x66\xa3\x3c\x14\x64\x3c\
\xf5\xce\x93\xe2\x64\x85\x22\x8b\xf1\x1b\x45\xc3\x7b\xdf\xf2\xa4\
\x06\x17\x3e\x45\xd7\xde\xd0\x56\x31\x89\xc3\xa9\xaf\x49\x0d\x32\
\xae\x7c\x54\x53\x0f\xd2\x1a\xaa\x8e\xcc\xdb\x4b\xf0\x8e\x1d\x5f\
\x8c\x2b\x54\xb4\x71\x7c\xd3\x3d\xeb\x41\xea\xc8\x01\x5e\x44\xcd\
\xee\x68\x39\xbf\x6d\x73\xb0\x35\x55\x91\x46\x82\x07\x6d\x24\x97\
\x31\x70\x9c\x5c\x64\x26\x1e\x09\x0a\xc5\x89\xea\x54\x45\xc7\x76\
\x8a\x27\x82\x42\x71\x64\x59\x2d\xc1\x0f\x6c\x4d\xe2\xd1\x28\xff\
\x5b\x24\x27\x68\xc2\x92\x3d\x2f\x27\x32\x6b\x6a\x4e\xc2\x01\x9f\
\x22\xf8\x43\x1f\xea\xe9\x1b\x49\x04\x7f\xd1\xc6\xba\x5a\xaa\x28\
\xcd\xc4\x47\xba\x3e\xab\x06\xd3\x32\xa1\x28\xcd\xc0\xf9\x60\x4b\
\xc7\xb3\xa8\xa9\x89\xe2\xf6\x2d\xcc\x6b\xdb\x8c\xf0\x9b\xbe\xff\
\x98\x98\x8b\xba\x8a\xae\xab\xf4\x35\x2e\x5a\x49\xf0\x96\x3e\xe8\
\x6b\xe9\x44\x7b\x2b\x16\x93\x1a\x64\x0c\x8c\xac\x29\x07\xf8\x39\
\xbd\x69\x03\x3e\xf4\x56\x57\xfc\xa1\xe4\xb2\x91\x9e\x4b\xab\xca\
\x92\x99\x4f\x14\x43\xaf\xb5\xfd\xea\xc5\x27\x20\x97\x5d\x3b\x33\
\xf4\x5c\xd7\x4e\x3c\xb4\x85\x62\xc9\x43\x6f\xb4\xc3\x7f\xf9\xbf\
\xbf\xf5\xbb\xe0\x37\xea\xe2\x9f\x3d\xa6\x0b\x48\xb0\x00\x00\x00\
\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\
\x00\x32\x30\x31\x35\x2d\x30\x34\x2d\x30\x38\x54\x32\x31\x3a\x32\
\x33\x3a\x31\x34\x2b\x30\x32\x3a\x30\x30\xb6\xc4\xc9\x4e\x00\x00\
\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\
\x79\x00\x32\x30\x31\x35\x2d\x30\x34\x2d\x30\x38\x54\x32\x31\x3a\
\x32\x33\x3a\x31\x31\x2b\x30\x32\x3a\x30\x30\x95\xa1\x5e\x55\x00\
\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\x00\
\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\
\x9b\xee\x3c\x1a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x05\x55\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x17\x08\x06\x00\x00\x00\xe0\x2a\xd4\xa0\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x06\
\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\
\x00\x07\x74\x49\x4d\x45\x07\xdf\x03\x10\x12\x11\x36\xbb\xc4\xe8\
\x82\x00\x00\x04\x59\x49\x44\x41\x54\x48\xc7\x9d\x95\xcb\x6f\x53\
\x47\x14\xc6\xbf\xb9\x73\x1f\x76\xd2\xf8\x91\xe0\x04\x12\xda\x34\
\xc1\xf8\x1a\x50\x5b\x92\x80\xc3\xae\x0f\x9b\xd0\x0d\xdd\x75\x41\
\xff\x09\x24\x9a\x0a\xa9\xca\xd6\xf9\x07\xca\x8a\xb2\xaa\xba\xea\
\xae\xb4\xa2\x2b\x4a\x55\xa9\x2d\x54\xb8\x48\xa1\x10\xc7\x8d\x8b\
\x9d\xd4\x09\x89\xec\x24\x76\xec\xeb\x7b\x6d\xcf\x9c\x2e\xfc\x4a\
\xa0\x28\xd0\x23\x8d\x74\x17\x33\xbf\xfb\x9d\xf3\x9d\x39\xc3\xf0\
\x12\xf1\xc5\xb5\x6b\x20\x22\x8f\xae\x6b\xef\x57\xca\x95\x3b\x60\
\xac\xf4\xe9\x95\x2b\x07\x9e\x53\x0e\xda\x10\x9f\x9f\xc7\xfa\xfa\
\xba\x5b\x55\xd5\xab\x2e\x97\xfb\x2b\x97\xdb\x7d\x75\x6b\x6b\xcb\
\x1d\x9f\x9f\x3f\x10\xce\x0f\x02\x3b\x8e\xe3\x1e\x1c\x1a\x9a\xab\
\x56\xab\xb3\x4f\x9e\x64\x7a\x0c\x43\x9f\xf6\xfa\x7c\xea\x6e\xa9\
\xf4\xdb\xcc\x85\x0b\x8d\x1f\x6f\xdf\x7e\xe1\x79\x76\x10\x38\x10\
\x08\xcc\xd9\xb6\x3d\xbb\x98\x5c\xd2\xf3\x5b\x5b\x18\xf0\xfb\x71\
\xf2\x44\xd8\xee\xed\xed\xfd\xce\xeb\xf5\xae\x68\x9a\xa6\x80\x08\
\x00\x40\x44\xb0\x1d\xe7\x1b\x4d\xd3\xee\x7e\x72\xe9\x12\xd4\x97\
\x05\x5b\xd5\x2a\x8e\x8f\x8f\x61\x35\xb7\x86\xc7\x8b\x49\xd7\xa9\
\x93\x27\x3e\x66\x8c\x81\x88\xc0\x55\xb5\xa3\x52\x08\xf1\x48\xe1\
\xfc\xee\x7f\xd6\xbc\x0d\x3e\xb4\x07\x5c\xb1\x2c\x9c\x9d\x9c\x40\
\x2c\x1a\xc5\xf4\x99\x29\x94\x2b\x16\x1e\x3d\x5e\xc4\xee\xee\x2e\
\xf2\xf9\x3c\xea\xf5\x3a\xfc\xfd\xfd\x50\x35\x3d\x5d\xb6\xaa\xbf\
\x94\x76\xcb\x00\xb0\x5f\x79\x3c\x1e\x87\x6d\xdb\xee\x40\x20\x30\
\xe7\xec\x01\x47\xa6\x26\x11\x8b\xc5\x70\xec\xd8\x31\x0c\x0f\x0f\
\x03\x00\xee\xdd\x4f\x20\xb9\xb4\x84\xb0\x69\xc2\x65\x18\x38\x3a\
\x32\x02\x21\xe5\xd6\xfd\x07\x0f\xd6\x0c\x5d\xdf\x0f\x8f\xc7\xe3\
\xb0\x1d\xc7\x3d\x18\x08\xcc\xd9\x8e\xf3\x1c\x38\x1c\x0e\x43\xd3\
\x34\x84\xc3\xe1\x8e\x98\xdf\x13\x7f\x20\xb9\x94\xc2\x09\x33\x84\
\x74\x3a\x0d\xff\xc0\x00\x0c\x5d\x07\xb5\x3c\x50\xdb\x60\xe7\x00\
\xb0\xaa\xaa\x20\x22\xa8\xaa\xfa\xcc\x0f\x1e\x60\x71\x29\x05\xc6\
\x18\x4a\xa5\x92\xbe\xbd\xb3\x63\x80\x35\x1d\xe0\x6d\x70\x60\x0f\
\xb8\x6c\x59\x98\x7e\x06\xbc\xef\x72\x28\x0a\xfa\xfb\xfb\xe1\xf1\
\x78\xe0\xd8\x55\x64\x56\x56\x51\x2c\x16\xe1\xf1\xf4\xf5\xf7\xf6\
\xf4\xd0\x6e\xb1\xf8\xeb\xcc\xcc\x4c\x83\x9f\x8d\x44\x7a\x87\x86\
\x86\x3e\x77\x6a\xb5\xcf\xda\xe0\xc8\xe4\x24\xce\x9f\x8f\xc1\x34\
\x4d\xe8\xba\x0e\x45\x51\xc0\x18\xdb\xb7\x38\x57\xe0\xf3\xf9\xe1\
\xf5\x7a\x61\xdb\x55\x64\x56\x56\x50\x2a\x95\xb8\xdf\xef\x9d\xee\
\xeb\xf3\xf0\x7c\xa1\x70\x4f\xad\xd7\xeb\x97\x0b\xf9\xfc\x6c\xee\
\xe9\x86\x5e\x2a\x97\x71\xee\xcc\x14\x62\xb1\x28\x4c\xd3\x84\xa6\
\x69\xc8\xae\x6d\x20\xbd\xb2\x01\xb0\x6e\x63\x11\x11\xfc\x9e\x1e\
\xbc\x1d\x1a\x85\x69\x9a\x20\x22\x10\x51\xd3\xe4\xd4\xb2\x3e\x72\
\x78\x68\xb6\x5e\xaf\x5b\xaa\xe3\x38\x85\x8d\x8d\xcd\xe4\xe6\xf6\
\xce\x91\xd0\xf8\x98\x3f\x1a\x8d\x6a\xa1\x50\x08\x9c\x73\x30\x10\
\x7e\x4a\xa4\x91\x58\xce\xc3\x1c\x3f\x84\x3f\x73\x0e\x4e\x0d\xeb\
\x58\xcc\x56\x61\x59\x16\xae\x5d\x1e\x86\xcb\xd0\x11\x0a\x85\x20\
\xa5\x44\x6a\x39\x5d\xff\x3b\x93\xdd\xb6\xad\xca\xba\xcf\xe7\x2b\
\x28\xb5\x5a\xed\xc6\x6a\x2e\x17\x7d\x92\x4e\xbf\xf7\xe1\x85\x99\
\x9b\xc1\x60\x10\x9c\x73\x48\x29\x21\xa5\x84\x66\xf4\x22\x30\x32\
\x8e\xd7\x4f\x9f\x84\x27\xf2\x0e\x46\xa6\x4e\xc1\x3f\x66\xc2\xf5\
\xda\x61\x10\x08\x52\x4a\x70\xce\x11\x0c\x06\x71\x3e\xfa\xc1\xd7\
\xa9\xe4\xe2\xbb\xab\xb9\x5c\xb4\x5e\xab\xdd\x50\xbf\xbc\x7e\x5d\
\x7c\x7b\xf3\x66\xe1\xa3\x8b\x17\x0b\x99\x4c\x66\x5d\x51\x14\x08\
\x21\x3a\x73\x8d\x31\x8e\x85\x35\x81\xdc\x3a\x21\x03\x8e\xb5\x0d\
\xc2\x66\x9e\x10\xe4\x2a\xa4\x6c\xc2\xa9\x65\xf2\xe4\xc4\x44\x36\
\x93\x5e\x4e\x72\xce\xf1\xf3\x9d\x3b\xcd\x56\x0c\x0c\x0e\x02\x00\
\x0c\xc3\x80\x90\x12\x8c\xb5\x2f\x33\x01\x4c\xc1\xd3\x52\x1d\xff\
\x6c\x0a\xec\xe8\x40\x46\x10\xd4\xa2\xc4\x71\x77\x33\x3b\x21\x04\
\x08\x00\x49\x09\x97\xcb\xb5\x47\x58\xab\xcf\x15\x45\xe9\x18\x25\
\x84\xe8\xc2\x89\x41\x12\xa1\x92\xaf\xa1\xf1\xb0\x08\xae\x31\x30\
\x09\x28\xdb\x36\xf0\x46\x53\x75\x43\x74\x4d\xa6\xe6\x6c\xd9\x0f\
\x6f\x87\x24\x82\x14\x02\xd8\x03\xf7\xf5\x19\x38\x3d\x5e\x83\xc2\
\x6b\xdd\x7d\x6e\xc2\xe8\x11\xa3\xa9\x1c\xdd\x89\x48\x52\xa2\xf1\
\x22\x38\x49\xd9\x2d\x0b\x11\xa4\x04\xa6\x82\x03\x78\x6b\xec\xd0\
\x73\x93\x53\x61\x04\x46\x04\x29\x64\xbb\x80\x5d\x71\x1d\xc7\x00\
\x9c\x8b\x44\x18\x63\x4c\xad\xda\xb6\x46\xd4\x4c\x57\x12\x41\x4a\
\x82\x0a\x81\x1e\xb5\xf1\xdc\x72\x71\x01\x49\xad\x7d\xad\x3e\xb7\
\x2c\x4b\xf3\x7a\xbd\x4a\xfb\x9d\x50\xf7\x64\x30\x9a\x4a\xa5\x8e\
\x4e\x4c\x4c\x74\x06\xcf\xab\xc6\xc2\xc2\xc2\x9b\x00\x8e\x02\x58\
\x03\xd0\x68\xc3\x05\x80\xca\x0f\xb7\x6e\x7d\x9f\xcd\x66\x1f\x52\
\x33\x18\x5a\x26\xb5\x6a\xca\x5a\x1f\x9d\x32\xb4\xa2\xdd\x5a\x2c\
\x91\x48\xfc\x05\xc0\x01\x20\xff\x97\xba\x57\x89\x7f\x01\x57\xb7\
\x71\x84\x11\xeb\x8a\xff\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\
\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x35\x2d\x30\
\x33\x2d\x31\x36\x54\x31\x38\x3a\x32\x32\x3a\x30\x33\x2b\x30\x31\
\x3a\x30\x30\x54\x50\xaa\x1e\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x35\x2d\
\x30\x33\x2d\x31\x36\x54\x31\x38\x3a\x31\x37\x3a\x35\x34\x2b\x30\
\x31\x3a\x30\x30\xd7\x81\x8f\x0d\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x02\x2c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x04\x08\x19\x1c\xd4\x4c\x91\x22\x00\x00\x01\xb9\x49\x44\
\x41\x54\x58\xc3\xd5\x97\x31\x6b\x14\x41\x18\x86\x9f\xdb\x3b\x89\
\x69\x02\xf1\x0f\x28\x24\x21\x7d\xda\x0b\x06\x82\x10\xec\x44\x24\
\x62\x29\xe1\x85\x10\x92\x23\x44\xb0\xd0\x3a\x65\x08\x62\x30\xc2\
\xfb\x07\x84\x20\x8a\x90\x26\x85\x92\x56\xb0\xb4\xb3\xc9\x3f\x08\
\x29\xc4\x24\xc8\xd9\xcc\xc9\xb2\xdc\xed\xed\xdd\xec\xde\x91\x17\
\x16\x96\x9d\xd9\x79\xde\xfd\x66\xf6\xfb\x66\x6a\x00\x92\x9e\x00\
\x2f\x80\x59\x20\x21\x5e\x6b\xb6\x3f\x17\xe9\xd8\x90\xf4\x18\x38\
\xa2\x5c\xdd\x2e\xda\x31\x01\x5e\x53\xbe\xea\x79\x8d\x92\x26\xff\
\x47\x00\xb8\x97\x6a\xfb\x01\x7c\xed\x37\x40\x1f\xd5\x80\x9f\x39\
\xf0\xfb\xc0\x5b\x49\xdb\xb6\xbf\x35\xc2\x0b\x1d\x9d\xd8\x7e\x45\
\x45\x92\xd4\x04\x3e\x01\x77\x80\x8f\x92\x9e\x25\x40\x3b\xd5\x67\
\xa2\x62\xf8\x97\x00\x07\x98\x06\x36\x13\x46\xa0\x2e\x70\x80\x53\
\xe0\x79\x63\x8c\xf0\x47\xb6\xcf\x93\x71\xc2\x29\x29\xe9\x0c\x0d\
\xaf\xcc\x40\x51\x78\x25\x06\x06\x81\x17\x32\x20\x69\x4e\x52\xbd\
\x0a\x78\x5f\x03\x92\x96\x80\xef\xc0\x41\x15\xf0\x4e\x2a\xee\x35\
\xe0\x43\xe0\x03\x30\x05\xac\x4b\xba\xb6\xdd\x8a\x85\x4b\xda\x02\
\x9e\x86\x04\x78\x91\x17\x81\xbb\x01\xde\xd1\x96\xa4\x37\x25\x7c\
\xf9\x3c\xd0\x04\x16\x81\xe5\x9e\x06\x6c\xbf\x07\x36\x32\x8f\x5b\
\x69\x13\x43\x86\xfd\x2a\x75\xff\x3b\x37\x13\xda\x3e\x94\x04\xf0\
\x2e\x63\xe2\x0f\x70\x9c\x2a\x2c\x85\xe7\x7c\xe0\xbf\xc0\xf6\x61\
\x97\x48\xbc\x0c\x65\x3b\x0a\x5e\x38\x0f\xf4\x30\x51\x8f\x85\x0f\
\x94\x88\x7a\x98\x88\x82\x0f\x9c\x09\xbb\x98\x88\x82\x0f\x95\x8a\
\x83\x89\x9d\xb0\x06\xa2\xe0\x43\xd7\x02\xdb\xfb\xc0\x83\x58\x78\
\x54\x31\xb2\xdd\x2e\xa3\x78\x8d\x64\x4b\x76\xa3\x0c\x5c\x8e\x80\
\x79\x99\xad\x86\xe9\x73\xc1\x8a\xa4\x5b\x91\x07\x93\x3c\xfd\x05\
\x96\xb3\x06\xce\xc2\x1e\x1d\x60\x21\x5c\xa3\xd2\x59\x02\xec\x8e\
\x71\x09\xec\xd6\x42\x59\x5d\x0d\xc7\xf3\x99\xcc\x94\x54\xa1\x36\
\xf0\x0b\xd8\xb3\x7d\xf4\x0f\x6d\x10\xbc\x65\x23\xf8\xb9\x8d\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x0b\
\x0b\x66\xc4\x87\
\x00\x75\
\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0c\xad\x0f\x07\
\x00\x64\
\x00\x65\x00\x6c\x00\x65\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0b\x9e\x84\x87\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x01\
\x00\x00\x00\x36\x00\x00\x00\x00\x00\x01\x00\x00\x04\x24\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x09\x7d\
\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x01\x3f\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
ctu-yfsg/2015-c-qgis-vfr
|
resources_rc.py
|
Python
|
gpl-2.0
| 13,627
|
from temboo.Library.RunKeeper.FitnessActivities.DeleteActivity import DeleteActivity, DeleteActivityInputSet, DeleteActivityResultSet, DeleteActivityChoreographyExecution
from temboo.Library.RunKeeper.FitnessActivities.RecordActivity import RecordActivity, RecordActivityInputSet, RecordActivityResultSet, RecordActivityChoreographyExecution
from temboo.Library.RunKeeper.FitnessActivities.RetrieveActivites import RetrieveActivites, RetrieveActivitesInputSet, RetrieveActivitesResultSet, RetrieveActivitesChoreographyExecution
from temboo.Library.RunKeeper.FitnessActivities.RetrieveActivity import RetrieveActivity, RetrieveActivityInputSet, RetrieveActivityResultSet, RetrieveActivityChoreographyExecution
from temboo.Library.RunKeeper.FitnessActivities.UpdateActivity import UpdateActivity, UpdateActivityInputSet, UpdateActivityResultSet, UpdateActivityChoreographyExecution
|
willprice/arduino-sphere-project
|
scripts/example_direction_finder/temboo/Library/RunKeeper/FitnessActivities/__init__.py
|
Python
|
gpl-2.0
| 880
|
class Stack():
def __init__(self, arg = []):
self.Q1 = arg
self.Q2 = []
def stack_empty(self):
if len(self.Q1) == 0 and len(self.Q2) == 0:
return True
else:
return False
def push(self, x):
if self.stack_empty() is True:
self.Q1.append(x)
elif len(self.Q1) != 0:
self.Q1.append(x)
else:
self.Q2.append(x)
def pop(self):
val = None
if self.stack_empty() == True:
print "underflow"
elif len(self.Q1) != 0:
while len(self.Q1) != 0:
if len(self.Q1) == 1:
val = self.Q1[0]
else:
self.Q2.append(self.Q1[0])
self.Q1.remove(self.Q1[0])
else:
while len(self.Q2) != 0:
if len(self.Q2) == 1:
val = self.Q2[0]
else:
self.Q1.append(self.Q2[0])
self.Q2.remove(self.Q2[0])
return val
def show_stack(self):
print "Queue 1 :", self.Q1
print "Queue 2 :", self.Q2
#-----------------------
input_num = ['a', 'b', 'c']
stk = Stack(input_num)
stk.pop()
stk.show_stack()
stk.pop()
stk.show_stack()
stk.push('d')
stk.show_stack()
stk.pop()
stk.show_stack()
|
jasonleaster/Algorithm
|
Stack/Python_version/stack_by_two_queue.py
|
Python
|
gpl-2.0
| 1,379
|
def max_sum_subarray(nums):
currSum, currMin, currMax = 0, 0, 0-2**31
for num in nums:
currSum += num
currMax = max(currMax, currSum-currMin)
currMin = min(currMin, currSum)
return currMax
if __name__ == '__main__':
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print max_sum_subarray(nums)
|
starcroce/PyAlgoDataStructure
|
array/maxim_subarray.py
|
Python
|
gpl-2.0
| 330
|
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2013 Emmanuel Viennet. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@gmail.com
#
##############################################################################
"""Generation bulletins de notes: exemple minimal pour les programmeurs
"""
# Quelques modules ScoDoc utiles:
from sco_pdf import *
import sco_preferences
from notes_log import log
import sco_bulletins_generator
import sco_bulletins_standard
class BulletinGeneratorExample(sco_bulletins_standard.BulletinGeneratorStandard):
"""Un exemple simple de bulletin de notes en version PDF seulement.
Part du bulletin standard et redéfini la partie centrale.
"""
description = 'exemple (ne pas utiliser)' # la description doit être courte: elle apparait dans le menu de paramètrage
supported_formats = [ 'pdf' ] # indique que ce générateur ne peut produire que du PDF (la version web sera donc celle standard de ScoDoc)
# En général, on veut définir un format de table spécial, sans changer le reste (titre, pied de page).
# Si on veut changer le reste, surcharger les méthodes:
# .bul_title_pdf(self) : partie haute du bulletin
# .bul_part_below(self, format='') : infos sous la table
# .bul_signatures_pdf(self) : signatures
def bul_table(self, format=''):
"""Défini la partie centrale de notre bulletin PDF.
Doit renvoyer une liste d'objets PLATYPUS
"""
assert format == 'pdf' # garde fou
return [
Paragraph( SU("L'étudiant %(nomprenom)s a une moyenne générale de %(moy_gen)s" % self.infos),
self.CellStyle # un style pdf standard
)
]
# Déclarer votre classe à ScoDoc:
sco_bulletins_generator.register_bulletin_class(BulletinGeneratorExample)
|
denys-duchier/Scolar
|
sco_bulletins_example.py
|
Python
|
gpl-2.0
| 2,668
|
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using SQLite"""
from Bio import MissingExternalDependencyError
from BioSQL import BioSeqDatabase
from common_BioSQL import *
##################################
# Start of user-editable section #
##################################
# Constants for the database driver
DBHOST = 'localhost'
DBUSER = 'root'
DBPASSWD = ''
TESTDB = 'biosql_test'
################################
# End of user-editable section #
################################
DBDRIVER = 'MySQLdb'
DBTYPE = 'mysql'
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
# Some of the unit tests don't create their own database,
# so just in case there is no database already:
create_database()
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_BioSQL_MySQLdb.py
|
Python
|
gpl-2.0
| 1,071
|
# Author: Hubert Kario, (c) 2016
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Test for CVE-2015-7575 (SLOTH)"""
from __future__ import print_function
import traceback
import sys
import getopt
import re
from itertools import chain
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, \
CertificateGenerator, CertificateVerifyGenerator, \
AlertGenerator
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectCertificateRequest, \
ExpectApplicationData
from tlslite.extensions import SignatureAlgorithmsExtension, \
SignatureAlgorithmsCertExtension
from tlslite.constants import CipherSuite, AlertDescription, \
HashAlgorithm, SignatureAlgorithm, ExtensionType
from tlslite.utils.keyfactory import parsePEMKey
from tlslite.x509 import X509
from tlslite.x509certchain import X509CertChain
from tlsfuzzer.helpers import RSA_SIG_ALL
def natural_sort_keys(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -k keyfile file with private key")
print(" -c certfile file with certificate of client")
print(" --help this message")
def main():
"""check if obsolete signature algorithm is rejected by server"""
conversations = {}
hostname = "localhost"
port = 4433
run_exclude = set()
private_key = None
cert = None
argv = sys.argv[1:]
opts, argv = getopt.getopt(argv, "h:p:e:k:c:", ["help"])
for opt, arg in opts:
if opt == '-k':
text_key = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_key = str(text_key, 'utf-8')
private_key = parsePEMKey(text_key, private=True)
elif opt == '-c':
text_cert = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_cert = str(text_cert, 'utf-8')
cert = X509()
cert.parse(text_cert)
elif opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
else:
raise ValueError("Unknown option: {0}".format(opt))
if argv:
run_only = set(argv)
else:
run_only = None
if not private_key:
raise ValueError("Specify private key file using -k")
if not cert:
raise ValueError("Specify certificate file using -c")
conversation = Connect(hostname, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create([
(getattr(HashAlgorithm, x),
SignatureAlgorithm.rsa) for x in ['sha512', 'sha384', 'sha256',
'sha224', 'sha1', 'md5']]),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(RSA_SIG_ALL)}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(CertificateVerifyGenerator(private_key))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["sanity"] = conversation
for prf in ['sha256', 'sha384']:
for md in ['sha1', 'sha256', 'sha384', 'sha512']:
conversation = Connect(hostname, port)
node = conversation
if prf == 'sha256':
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
else:
ciphers = [CipherSuite.TLS_RSA_WITH_AES_256_GCM_SHA384,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create([
(getattr(HashAlgorithm, x),
SignatureAlgorithm.rsa) for x in ['sha512', 'sha384', 'sha256',
'sha224', 'sha1', 'md5']]),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(RSA_SIG_ALL)}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(CertificateVerifyGenerator(
private_key, msg_alg=(getattr(HashAlgorithm, md), SignatureAlgorithm.rsa)))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["check {0} w/{1} PRF".format(md, prf)] = \
conversation
# run the conversation
good = 0
bad = 0
failed = []
print("Certificate Verify test version 4")
sanity_test = ('sanity', conversations['sanity'])
ordered_tests = chain([sanity_test],
filter(lambda x: x[0] != 'sanity',
conversations.items()),
[sanity_test])
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
#because we don't want to abort the testing and we are reporting
#the errors to the user, using a bare except is OK
#pylint: disable=bare-except
try:
runner.run()
except:
print("Error while processing")
print(traceback.format_exc())
res = False
#pylint: enable=bare-except
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test end")
print("successful: {0}".format(good))
print("failed: {0}".format(bad))
failed_sorted = sorted(failed, key=natural_sort_keys)
print(" {0}".format('\n '.join(repr(i) for i in failed_sorted)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
|
mildass/tlsfuzzer
|
scripts/test-rsa-sigs-on-certificate-verify.py
|
Python
|
gpl-2.0
| 8,772
|
##
# Copyright 2015-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing pbdMPI, implemented as an easyblock
@author: Ewan Higgs (Ghent University)
@author: Peter Maxwell (University of Auckland)
"""
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.rpackage import RPackage
class EB_pbdMPI(RPackage):
"""Support for building/installing pbdMPI."""
def configure_step(self):
"""Configure Step of build process for pbdMPI."""
mpi_types = {
toolchain.INTELMPI: 'INTELMPI',
toolchain.MPI_TYPE_MPICH: 'MPICH',
toolchain.MPI_TYPE_OPENMPI: 'OPENMPI',
}
mpi_type = mpi_types[self.toolchain.mpi_family()]
self.configureargs.extend([
"--with-mpi-include=%s" % self.toolchain.get_variable('MPI_INC_DIR'),
"--with-mpi-libpath=%s" % self.toolchain.get_variable('MPI_LIB_DIR'),
"--with-mpi=%s" % self.toolchain.get_software_root(self.toolchain.MPI_MODULE_NAME)[0],
"--with-mpi-type=%s" % mpi_type,
])
super(EB_pbdMPI, self).configure_step()
def run(self):
"""Configure before installing pbdMPI as an extension."""
self.configure_step()
super(EB_pbdMPI, self).run()
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/p/pbdmpi.py
|
Python
|
gpl-2.0
| 2,289
|
from gettext import gettext as _
from django.http import HttpResponse, HttpResponseRedirect
from django import forms
from django.shortcuts import render_to_response
import sys
sys.path.append('../../../')
import re
import gourmet.backends.db
import gourmet.shopping
import gourmet.recipeManager
import json
from django.shortcuts import render
class MultiplierForm (forms.Form):
yields = forms.FloatField(label='New Yield',min_value=0,required=False)
multiplier = None
#multiplier = forms.FloatField(label='x',min_value=0,required=False)
class NoYieldsMultiplierForm (forms.Form):
multiplier = forms.FloatField(label='x',min_value=0,required=False)
yields = None
class SearchForm (forms.Form):
choices = {str(_('anywhere')):'anywhere',
str(_('title')):'title',
str(_('ingredient')):'ingredient',
str(_('instructions')):'instructions',
str(_('notes')):'modifications',
str(_('category')):'category',
str(_('cuisine')):'cuisine',
str(_('source')):'source',}
search_field = forms.CharField(max_length=100)
regexp_field = forms.BooleanField(label='Use regexp')
choice_field = forms.ChoiceField(label='Search in...',
initial='anywhere',
choices=list(choices.items())
)
rd = gourmet.backends.db.get_database()
class MyShoppingList (gourmet.shopping.ShoppingList):
def get_shopper (self, lst):
return gourmet.recipeManager.DatabaseShopper(lst, rd)
slist = MyShoppingList()
def list_recs (view, default_search_values={},
template='index.html'):
sf = SearchForm()
for k,v in list(default_search_values.items()):
print('Set',k,'to',v)
sf.fields[k].initial = v
return render_to_response(
template,
{'recs':[(rec,rd.get_cats(rec)) for rec in view],
'form':sf
}
)
def index (request):
return list_recs(rd.fetch_all(rd.recipe_table,deleted=False))
def sort (request, field):
return list_recs(rd.fetch_all(rd.recipe_table,deleted=False,sort_by=[(field,1)]))
def do_search_xhr (request):
if request.method == 'POST':
form = SearchForm(request.POST)
print('Searching ',form.data['search_field'])
return search(request,form.data['search_field'],template='list.html')
else:
print('Not a post!')
def do_search (request):
if request.method == 'POST':
form = SearchForm(request.POST)
print('Searching ',form.data['search_field'])
return search(request,form.data['search_field'])
else:
print('Not a post!')
def about(request):
return render(request, 'about.html')
def search (request, term, template='index.html'):
vw = rd.search_recipes(
[{'column':'deleted','operator':'=','search':False},
{'column':'anywhere',
'operator':'LIKE',
'search':'%'+term.replace('%','%%'+'%')+'%',
}
]
)
print('We got ',len(vw),'for "%s"'%term)
return list_recs(vw, default_search_values={
'search_field':term,
'regexp_field':False,
'choice_field':'anywhere',
},
template=template
)
def get_ings (rec_id, mult):
ings = rd.order_ings(rd.get_ings(rec_id))
formatted_ings = []
for g,items in ings:
formatted_items = []
for item in items:
strings = []
amt,unit = rd.get_amount_and_unit(item,mult=mult)
if amt: strings.append(amt)
if unit: strings.append(unit)
strings.append(item.item)
if item.optional: strings.append(' (optional)')
formatted_items.append(' '.join(strings))
formatted_ings.append((g,formatted_items))
return formatted_ings
def rec (request, rec_id, mult=1):
mult = float(mult)
rec = rd.get_rec(rec_id)
formatted_ings = get_ings(rec_id,mult)
def textify (t):
if not t: return ''
print('textifying "%s"'%t)
return re.sub('\n','<br>',
re.sub('\n\n+','</p><p>','<p>%s</p>'%t.strip()))
if rec.yields:
print('WITH YIELDS')
mf = MultiplierForm()
else:
print('WITHOUT YIELDS')
mf = NoYieldsMultiplierForm()
return render_to_response(
'rec.html',
{'rd':rd,
'r':rec,
'ings':formatted_ings,
'cats':', '.join(rd.get_cats(rec)),
'instructions':textify(rec.instructions),
'notes':textify(rec.modifications),
'mult':mult,
'yields':(rec.yields and rec.yields * mult or None),
'is_adjusted': (mult!=1),
'multiplier_form':mf,
}
)
def multiply_rec_xhr (request):
return multiply_rec(request,xhr=True)
def multiply_rec (request, xhr=None):
# We can't do yields and multiplier in the same place!
print('MULTIPLY!')
if request.method == 'POST':
form = MultiplierForm(request.POST)
if form.is_valid():
recid = request.POST.get('rid',None)
try:
multiplier = form.cleaned_data['multiplier']
except:
yields = form.cleaned_data['yields']
orig_yields = rd.get_rec(recid).yields
multiplier = (yields / float(orig_yields))
if xhr:
rec = rd.get_rec(recid)
d = {'yields':rec.yields * multiplier,
'ingredients':get_ings(recid,multiplier),
'multiplier':multiplier}
return HttpResponse(
json.dumps(d),
content_type='application/javascript'
)
else:
return HttpResponseRedirect('/rec/%s/%s'%(recid,multiplier))
def shop (request, rec_id=None, mult=1):
mult = float(mult)
if rec_id is not None:
slist.addRec(rd.get_rec(rec_id),mult)
recs = list(slist.recs.values())
data,pantry = slist.organize_list(slist.lst)
#recs = [('foo',4),]
#data = [('sugar','3 cups'),]
#pantry = [('sugar','3 cups'),]
return render_to_response('shop.html',{'data':data,'pantry':pantry,
'recs':recs})
def shop_remove (request, rec_id=None):
try:
rec_id = int(rec_id)
if rec_id in slist.recs:
del slist.recs[int(rec_id)]
else:
print('Odd, there is no ',rec_id,'on the shopping list')
except TypeError:
print('Odd, rec_id',rec_id,'is the wrong type')
raise
return shop(request)
def shop_to_pantry (request):
if request.method == 'POST':
for item in request.POST:
if item != 'submit':
slist.sh.add_to_pantry(item)
return HttpResponseRedirect('/shop/')
def shop_to_list (request):
if request.method == 'POST':
for item in request.POST:
if item != 'submit':
slist.sh.remove_from_pantry(item)
return HttpResponseRedirect('/shop/')
def thumb (request, rec_id):
return HttpResponse(rd.get_rec(rec_id).thumb,
content_type = 'image/jpeg'
)
def img (request, rec_id):
return HttpResponse(rd.get_rec(rec_id).image,
content_type = 'image/jpeg'
)
|
thinkle/gourmet
|
gourmet/plugins/web_plugin/gourmetweb/recview/views.py
|
Python
|
gpl-2.0
| 7,488
|
#
# Copyright (C) 2013-2016 Fabian Gieseke <fabian.gieseke@di.ku.dk>
# License: GPL v2
#
import os
import sys
import numpy
TIMING = 1
WORKGROUP_SIZE_BRUTE = 256
WORKGROUP_SIZE_LEAVES = 32
WORKGROUP_SIZE_UPDATE = 16
WORKGROUP_SIZE_COPY_INIT = 32
WORKGROUP_SIZE_COMBINE = 64
WORKGROUP_SIZE_TEST_SUBSET = 32
WORKGROUP_SIZE_COPY_DISTS_INDICES = 32
FILES_TO_BE_COMPILED = ["neighbors/buffer_kdtree/base.c", \
"neighbors/buffer_kdtree/cpu.c", \
"neighbors/buffer_kdtree/gpu_opencl.c", \
"neighbors/buffer_kdtree/util.c", \
"neighbors/buffer_kdtree/kdtree.c", \
"timing.c", \
"util.c", \
"opencl.c" \
]
DIRS_TO_BE_INCLUDED = ["neighbors/buffer_kdtree/include"]
# paths
SOURCES_RELATIVE_PATH = "../../src/"
current_path = os.path.dirname(os.path.abspath(__file__))
sources_abs_path = os.path.abspath(os.path.join(current_path, SOURCES_RELATIVE_PATH))
# source files
source_files = [os.path.abspath(os.path.join(sources_abs_path, x)) for x in FILES_TO_BE_COMPILED]
include_paths = [os.path.abspath(os.path.join(sources_abs_path, x)) for x in DIRS_TO_BE_INCLUDED]
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
if sys.version_info >= (3, 0):
swig_opts = ['-modern', '-threads', '-py3']
else:
swig_opts = ['-modern', '-threads']
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('neighbors/buffer_kdtree', parent_package, top_path)
# CPU + FLOAT
config.add_extension("_wrapper_cpu_float", \
sources=["swig/cpu_float.i"] + source_files,
swig_opts=swig_opts,
include_dirs=[numpy_include] + [include_paths],
define_macros=[
('SOURCE_PATH', os.path.join(SOURCES_RELATIVE_PATH, "neighbors/buffer_kdtree")),
('USE_DOUBLE', 0),
('TIMING', TIMING)
],
libraries=['OpenCL', 'gomp', 'm'],
extra_compile_args=["-fopenmp", '-O3', '-w'] + ['-I' + ipath for ipath in include_paths])
# CPU + DOUBLE
config.add_extension("_wrapper_cpu_double", \
sources=["swig/cpu_double.i"] + source_files,
swig_opts=swig_opts,
include_dirs=[numpy_include] + [include_paths],
define_macros=[
('SOURCE_PATH', os.path.join(SOURCES_RELATIVE_PATH, "neighbors/buffer_kdtree")),
('USE_DOUBLE', 1),
('TIMING', TIMING)
],
libraries=['OpenCL', 'gomp', 'm'],
extra_compile_args=["-fopenmp", '-O3', '-w'] + ['-I' + ipath for ipath in include_paths])
# GPU + FLOAT
config.add_extension("_wrapper_gpu_opencl_float", \
sources=["swig/gpu_float.i"] + source_files,
swig_opts=swig_opts,
include_dirs=[numpy_include] + [include_paths],
define_macros=[
('SOURCE_PATH', os.path.join(SOURCES_RELATIVE_PATH, "neighbors/buffer_kdtree")),
('USE_GPU', 1),
('USE_DOUBLE', 0),
('TIMING', TIMING),
('WORKGROUP_SIZE_BRUTE', WORKGROUP_SIZE_BRUTE),
('WORKGROUP_SIZE_LEAVES', WORKGROUP_SIZE_LEAVES),
('WORKGROUP_SIZE_UPDATE', WORKGROUP_SIZE_UPDATE),
('WORKGROUP_SIZE_COPY_INIT', WORKGROUP_SIZE_COPY_INIT),
('WORKGROUP_SIZE_COMBINE', WORKGROUP_SIZE_COMBINE),
('WORKGROUP_SIZE_TEST_SUBSET', WORKGROUP_SIZE_TEST_SUBSET),
('WORKGROUP_SIZE_COPY_DISTS_INDICES', WORKGROUP_SIZE_COPY_DISTS_INDICES),
],
libraries=['OpenCL', 'gomp'],
extra_compile_args=["-fopenmp", '-O3', '-w'] + ['-I' + ipath for ipath in include_paths])
# GPU + DOUBLE
config.add_extension("_wrapper_gpu_opencl_double", \
sources=["swig/gpu_double.i"] + source_files,
swig_opts=swig_opts,
include_dirs=[numpy_include] + [include_paths],
define_macros=[
('SOURCE_PATH', os.path.join(SOURCES_RELATIVE_PATH, "neighbors/buffer_kdtree")),
('USE_GPU', 1),
('USE_DOUBLE', 1),
('TIMING', TIMING),
('WORKGROUP_SIZE_BRUTE', WORKGROUP_SIZE_BRUTE),
('WORKGROUP_SIZE_LEAVES', WORKGROUP_SIZE_LEAVES),
('WORKGROUP_SIZE_UPDATE', WORKGROUP_SIZE_UPDATE),
('WORKGROUP_SIZE_COPY_INIT', WORKGROUP_SIZE_COPY_INIT),
('WORKGROUP_SIZE_COMBINE', WORKGROUP_SIZE_COMBINE),
('WORKGROUP_SIZE_TEST_SUBSET', WORKGROUP_SIZE_TEST_SUBSET),
('WORKGROUP_SIZE_COPY_DISTS_INDICES', WORKGROUP_SIZE_COPY_DISTS_INDICES),
],
libraries=['OpenCL', 'gomp'],
extra_compile_args=["-fopenmp", '-O3', '-w'] + ['-I' + ipath for ipath in include_paths])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
gieseke/bufferkdtree
|
bufferkdtree/neighbors/buffer_kdtree/setup.py
|
Python
|
gpl-2.0
| 6,584
|