code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Miscellaneous utilities for testing.
"""
import re
import sys
import os
import tempfile
from pydoop.hdfs import default_is_local
def inject_code(new_code, target_code):
"""
Inject new_code into target_code, before the first import.
NOTE: this is just a hack to make examples work out-of-the-box, in
the general case it can fail in several ways.
"""
new_code = "{0}#--AUTO-INJECTED--{0}{1}{0}#-----------------{0}".format(
os.linesep, os.linesep.join(new_code.strip().splitlines())
)
pos = max(target_code.find("import"), 0)
if pos:
pos = target_code.rfind(os.linesep, 0, pos) + 1
return target_code[:pos] + new_code + target_code[pos:]
def add_sys_path(target_code):
new_code = os.linesep.join([
"import sys",
"sys.path = %r" % (sys.path,)
])
return inject_code(new_code, target_code)
def parse_mr_output(output, vtype=str):
d = {}
for line in output.splitlines():
if line.isspace():
continue
try:
k, v = line.split()
v = vtype(v)
except (ValueError, TypeError):
raise ValueError("bad output format")
d[k] = v
return d
def compare_counts(c1, c2):
if len(c1) != len(c2):
print len(c1), len(c2)
return "number of keys differs"
keys = sorted(c1)
if sorted(c2) != keys:
return "key lists are different"
for k in keys:
if c1[k] != c2[k]:
return "values are different for key %r (%r != %r)" % (
k, c1[k], c2[k]
)
class LocalWordCount(object):
def __init__(self, input_path, min_occurrence=0):
self.input_path = input_path
self.min_occurrence = min_occurrence
self.__expected_output = None
@property
def expected_output(self):
if self.__expected_output is None:
self.__expected_output = self.run()
return self.__expected_output
def run(self):
wc = {}
if os.path.isdir(self.input_path):
for fn in os.listdir(self.input_path):
if fn[0] == ".":
continue
self._wordcount_file(wc, fn, self.input_path)
else:
self._wordcount_file(wc, self.input_path)
if self.min_occurrence:
wc = dict(t for t in wc.iteritems() if t[1] >= self.min_occurrence)
return wc
def _wordcount_file(self, wc, fn, path=None):
with open(os.path.join(path, fn) if path else fn) as f:
for line in f:
words = re.sub('[^0-9a-zA-Z]+', ' ', line).split()
for w in words:
wc[w] = wc.get(w, 0) + 1
def check(self, output):
res = compare_counts(
parse_mr_output(output, vtype=int), self.expected_output
)
if res:
return "ERROR: %s" % res
else:
return "OK."
def get_wd_prefix(base="pydoop_"):
if default_is_local():
return os.path.join(tempfile.gettempdir(), "pydoop_")
else:
return base
| ilveroluca/pydoop | pydoop/test_support.py | Python | apache-2.0 | 3,704 |
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
from lucene import JArray
from PyLuceneTestCase import PyLuceneTestCase
from MultiSpansWrapper import MultiSpansWrapper
from java.io import StringReader
from org.apache.lucene.analysis import Analyzer
from org.apache.lucene.analysis.core import \
LowerCaseTokenizer, WhitespaceTokenizer
from org.apache.lucene.analysis.tokenattributes import \
CharTermAttribute, OffsetAttribute, PayloadAttribute, \
PositionIncrementAttribute
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.index import MultiFields, Term
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.search import MultiPhraseQuery, PhraseQuery
from org.apache.lucene.search.payloads import PayloadSpanUtil
from org.apache.lucene.search.spans import SpanNearQuery, SpanTermQuery
from org.apache.lucene.util import BytesRef, Version
from org.apache.pylucene.analysis import \
PythonAnalyzer, PythonFilteringTokenFilter, PythonTokenFilter, \
PythonTokenizer
class PositionIncrementTestCase(PyLuceneTestCase):
"""
Unit tests ported from Java Lucene
"""
def testSetPosition(self):
class _tokenizer(PythonTokenizer):
def __init__(_self, reader):
super(_tokenizer, _self).__init__(reader)
_self.TOKENS = ["1", "2", "3", "4", "5"]
_self.INCREMENTS = [1, 2, 1, 0, 1]
_self.i = 0
_self.posIncrAtt = _self.addAttribute(PositionIncrementAttribute.class_)
_self.termAtt = _self.addAttribute(CharTermAttribute.class_)
_self.offsetAtt = _self.addAttribute(OffsetAttribute.class_)
def incrementToken(_self):
if _self.i == len(_self.TOKENS):
return False
_self.clearAttributes()
_self.termAtt.append(_self.TOKENS[_self.i])
_self.offsetAtt.setOffset(_self.i, _self.i)
_self.posIncrAtt.setPositionIncrement(_self.INCREMENTS[_self.i])
_self.i += 1
return True
def end(_self):
pass
def reset(_self):
pass
def close(_self):
pass
class _analyzer(PythonAnalyzer):
def createComponents(_self, fieldName, reader):
return Analyzer.TokenStreamComponents(_tokenizer(reader))
writer = self.getWriter(analyzer=_analyzer())
d = Document()
d.add(Field("field", "bogus", TextField.TYPE_STORED))
writer.addDocument(d)
writer.commit()
writer.close()
searcher = self.getSearcher()
reader = searcher.getIndexReader()
pos = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "field", BytesRef("1"))
pos.nextDoc()
# first token should be at position 0
self.assertEqual(0, pos.nextPosition())
pos = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "field", BytesRef("2"))
pos.nextDoc()
# second token should be at position 2
self.assertEqual(2, pos.nextPosition())
q = PhraseQuery()
q.add(Term("field", "1"))
q.add(Term("field", "2"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# same as previous, just specify positions explicitely.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 1)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# specifying correct positions should find the phrase.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 2)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "3"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# phrase query would find it when correct positions are specified.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "4"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
# phrase query should fail for non existing searched term
# even if there exist another searched terms in the same searched
# position.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "9"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# multi-phrase query should succed for non existing searched term
# because there exist another searched terms in the same searched
# position.
mq = MultiPhraseQuery()
mq.add([Term("field", "3"), Term("field", "9")], 0)
hits = searcher.search(mq, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "4"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
def testPayloadsPos0(self):
writer = self.getWriter(analyzer=TestPayloadAnalyzer())
doc = Document()
doc.add(Field("content", "a a b c d e a f g h i j a b k k",
TextField.TYPE_STORED))
writer.addDocument(doc)
reader = writer.getReader()
writer.close()
tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"content", BytesRef("a"))
count = 0
self.assert_(tp.nextDoc() != tp.NO_MORE_DOCS)
# "a" occurs 4 times
self.assertEqual(4, tp.freq())
expected = 0
self.assertEqual(expected, tp.nextPosition())
self.assertEqual(1, tp.nextPosition())
self.assertEqual(3, tp.nextPosition())
self.assertEqual(6, tp.nextPosition())
# only one doc has "a"
self.assert_(tp.nextDoc() == tp.NO_MORE_DOCS)
searcher = self.getSearcher(reader=reader)
stq1 = SpanTermQuery(Term("content", "a"))
stq2 = SpanTermQuery(Term("content", "k"))
sqs = [stq1, stq2]
snq = SpanNearQuery(sqs, 30, False)
count = 0
sawZero = False
pspans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq)
while pspans.next():
payloads = pspans.getPayload()
sawZero |= pspans.start() == 0
it = payloads.iterator()
while it.hasNext():
count += 1
it.next()
self.assertEqual(5, count)
self.assert_(sawZero)
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq)
count = 0
sawZero = False
while spans.next():
count += 1
sawZero |= spans.start() == 0
self.assertEqual(4, count)
self.assert_(sawZero)
sawZero = False
psu = PayloadSpanUtil(searcher.getTopReaderContext())
pls = psu.getPayloadsForQuery(snq)
count = pls.size()
it = pls.iterator()
while it.hasNext():
bytes = JArray('byte').cast_(it.next())
s = bytes.string_
sawZero |= s == "pos: 0"
self.assertEqual(5, count)
self.assert_(sawZero)
class StopWhitespaceAnalyzer(PythonAnalyzer):
def __init__(self, enablePositionIncrements):
super(StopWhitespaceAnalyzer, self).__init__()
self.enablePositionIncrements = enablePositionIncrements
def createComponents(self, fieldName, reader):
class _stopFilter(PythonFilteringTokenFilter):
def __init__(_self, tokenStream):
super(_stopFilter, _self).__init__(Version.LUCENE_CURRENT, tokenStream)
_self.termAtt = _self.addAttribute(CharTermAttribute.class_);
def accept(_self):
return _self.termAtt.toString() != "stop"
source = WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)
return Analyzer.TokenStreamComponents(source, _stopFilter(source))
class TestPayloadAnalyzer(PythonAnalyzer):
def createComponents(self, fieldName, reader):
source = LowerCaseTokenizer(Version.LUCENE_CURRENT, reader)
return Analyzer.TokenStreamComponents(source, PayloadFilter(source, fieldName))
class PayloadFilter(PythonTokenFilter):
def __init__(self, input, fieldName):
super(PayloadFilter, self).__init__(input)
self.input = input
self.fieldName = fieldName
self.pos = 0
self.i = 0
self.posIncrAttr = input.addAttribute(PositionIncrementAttribute.class_)
self.payloadAttr = input.addAttribute(PayloadAttribute.class_)
self.termAttr = input.addAttribute(CharTermAttribute.class_)
def incrementToken(self):
if self.input.incrementToken():
bytes = JArray('byte')("pos: %d" %(self.pos))
self.payloadAttr.setPayload(BytesRef(bytes))
if self.pos == 0 or self.i % 2 == 1:
posIncr = 1
else:
posIncr = 0
self.posIncrAttr.setPositionIncrement(posIncr)
self.pos += posIncr
self.i += 1
return True
return False
if __name__ == "__main__":
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
else:
unittest.main()
| owenmorris/pylucene | test/test_PositionIncrement.py | Python | apache-2.0 | 11,319 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mec
| mecforlove/oj-web | app/utils/__init__.py | Python | apache-2.0 | 61 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import logging.config
import logging.handlers
import sys
import traceback
import six
from six import moves
from oslo_context import context as context_utils
from oslo_serialization import jsonutils
def _dictify_context(context):
if context is None:
return {}
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
# A configuration object is given to us when the application registers
# the logging options.
_CONF = None
def _store_global_conf(conf):
global _CONF
_CONF = conf
def _update_record_with_context(record):
"""Given a log record, update it with context information.
The request context, if there is one, will either be in the
extra values for the incoming record or in the global
thread-local store.
"""
context = record.__dict__.get(
'context',
context_utils.get_current()
)
d = _dictify_context(context)
# Copy the context values directly onto the record so they can be
# used by the formatting strings.
for k, v in d.items():
setattr(record, k, v)
return context
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
# Build the extra values that were given to us, including
# the context.
context = _update_record_with_context(record)
if hasattr(record, 'extra'):
extra = record.extra.copy()
else:
extra = {}
for key in getattr(record, 'extra_keys', []):
if key not in extra:
extra[key] = getattr(record, key)
# If we saved a context object, explode it into the extra
# dictionary because the values are more useful than the
# object reference.
if 'context' in extra:
extra.update(_dictify_context(context))
del extra['context']
message['extra'] = extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
self.conf = kwargs.pop('config', _CONF)
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
context = _update_record_with_context(record)
if context:
# FIXME(dhellmann): We should replace these nova-isms with
# more generic handling in the Context class. See the
# app-agnostic-logging-parameters blueprint.
instance = getattr(context, 'instance', None)
instance_uuid = getattr(context, 'instance_uuid', None)
# resource_uuid was introduced in oslo_context's
# RequestContext
resource_uuid = getattr(context, 'resource_uuid', None)
instance_extra = ''
if instance:
instance_extra = (self.conf.instance_format
% {'uuid': instance})
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif resource_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': resource_uuid})
record.instance = instance_extra
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity', 'resource'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = self.conf.logging_context_format_string
else:
fmt = self.conf.logging_default_format_string
if (record.levelno == logging.DEBUG and
self.conf.logging_debug_format_suffix):
fmt += " " + self.conf.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if self.conf.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = self.conf.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
| JioCloud/oslo.log | oslo_log/formatters.py | Python | apache-2.0 | 8,844 |
import imp
import logging
import os
import sys
from mock import *
from .gp_unittest import *
from gppylib.gpcatalog import GPCatalogTable
class GpCheckCatTestCase(GpTestCase):
def setUp(self):
# because gpcheckcat does not have a .py extension, we have to use imp to import it
# if we had a gpcheckcat.py, this is equivalent to:
# import gpcheckcat
# self.subject = gpcheckcat
gpcheckcat_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpcheckcat")
self.subject = imp.load_source('gpcheckcat', gpcheckcat_file)
self.subject.check_gpexpand = lambda : (True, "")
self.db_connection = Mock(spec=['close', 'query'])
self.unique_index_violation_check = Mock(spec=['runCheck'])
self.foreign_key_check = Mock(spec=['runCheck', 'checkTableForeignKey'])
self.apply_patches([
patch("gpcheckcat.pg.connect", return_value=self.db_connection),
patch("gpcheckcat.UniqueIndexViolationCheck", return_value=self.unique_index_violation_check),
patch("gpcheckcat.ForeignKeyCheck", return_value=self.foreign_key_check),
patch('os.environ', new={}),
])
self.subject.logger = Mock(spec=['log', 'info', 'debug', 'error', 'fatal'])
self.unique_index_violation_check.runCheck.return_value = []
self.leaked_schema_dropper = Mock(spec=['drop_leaked_schemas'])
self.leaked_schema_dropper.drop_leaked_schemas.return_value = []
issues_list = dict()
issues_list['cat1'] = [('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
issues_list['cat2'] = [('pg_type', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
self.foreign_key_check.runCheck.return_value = issues_list
self.subject.GV.coordinator_dbid = 0
self.subject.GV.cfg = {0:dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0),
1:dict(hostname='host1', port=123, id=1, address='123', datadir='dir', content=1, dbid=1)}
self.subject.GV.checkStatus = True
self.subject.GV.foreignKeyStatus = True
self.subject.GV.missingEntryStatus = True
self.subject.setError = Mock()
self.subject.print_repair_issues = Mock()
def test_running_unknown_check__raises_exception(self):
with self.assertRaises(LookupError):
self.subject.runOneCheck('some_unknown_check')
# @skip("order of checks")
# def test_run_all_checks__runs_all_checks_in_correct_order(self):
# self.subject.runAllChecks()
#
# self.unique_index_violation_check.runCheck.assert_any_call(self.db_connection)
# # add other checks here
# # figure out how to enforce the order of calls;
# # at a minimum, check the order number of the static list gpcheckcat.all_checks
def test_running_unique_index_violation_check__makes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.unique_index_violation_check.runCheck.assert_called_with(self.db_connection)
def test_running_unique_index_violation_check__when_no_violations_are_found__passes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.assertTrue(self.subject.GV.checkStatus)
self.subject.setError.assert_not_called()
def test_running_unique_index_violation_check__when_violations_are_found__fails_the_check(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.assertFalse(self.subject.GV.checkStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
def test_checkcat_report__after_running_unique_index_violations_check__reports_violations(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.subject.checkcatReport()
expected_message1 = ' Table stephen_table has a violated unique index: finger'
expected_message2 = ' Table larry_table has a violated unique index: stock'
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message1, log_messages)
self.assertIn(expected_message2, log_messages)
def test_drop_leaked_schemas__when_no_leaked_schemas_exist__passes_gpcheckcat(self):
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas____when_leaked_schemas_exist__finds_and_drops_leaked_schemas(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.leaked_schema_dropper.drop_leaked_schemas.assert_called_once_with(self.db_connection)
def test_drop_leaked_schemas__when_leaked_schemas_exist__passes_gpcheckcat(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas__when_leaked_schemas_exist__reports_which_schemas_are_dropped(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, "some_db_name")
expected_message = "Found and dropped 2 unbound temporary schemas"
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message, log_messages)
def test_automatic_thread_count(self):
self.db_connection.query.return_value.getresult.return_value = [[0]]
self._run_batch_size_experiment(100)
self._run_batch_size_experiment(101)
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gppylib.gplog.log_literal')
def test_truncate_batch_size(self, mock_log, mock_gpcheckcat, mock_sys_exit):
self.subject.GV.opt['-B'] = 300 # override the setting from available memory
# setup conditions for 50 primaries and plenty of RAM such that max threads > 50
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
self.db_connection.query.return_value.getresult.return_value = [['4.3']]
self.db_connection.query.return_value.dictresult.return_value = primaries
testargs = ['some_string','-port 1', '-R foo']
# GOOD_MOCK_EXAMPLE for testing functionality in "__main__": put all code inside a method "main()",
# which can then be mocked as necessary.
with patch.object(sys, 'argv', testargs):
self.subject.main()
self.assertEqual(self.subject.GV.opt['-B'], len(primaries))
#mock_log.assert_any_call(50, "Truncated batch size to number of primaries: 50")
# I am confused that .assert_any_call() did not seem to work as expected --Larry
last_call = mock_log.call_args_list[0][0][2]
self.assertEqual(last_call, "Truncated batch size to number of primaries: 50")
@patch('gpcheckcat_modules.repair.Repair', return_value=Mock())
@patch('gpcheckcat_modules.repair.Repair.create_repair_for_extra_missing', return_value="/tmp")
def test_do_repair_for_extra__issues_repair(self, mock1, mock2):
issues = {("pg_class", "oid"):"extra"}
self.subject.GV.opt['-E'] = True
self.subject.do_repair_for_extra(issues)
self.subject.setError.assert_any_call(self.subject.ERROR_REMOVE)
self.subject.print_repair_issues.assert_any_call("/tmp")
@patch('gpcheckcat.removeFastSequence')
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__with_arg_gp_fastsequence(self, process_foreign_key_mock,fast_seq_mock):
cat_mock = Mock()
self.subject.GV.catalog = cat_mock
gp_fastsequence_issue = dict()
gp_fastsequence_issue['gp_fastsequence'] = [('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
gp_fastsequence_issue['cat2'] = [('pg_type', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
self.foreign_key_check.runCheck.return_value = gp_fastsequence_issue
cat_tables = ["input1", "input2"]
self.subject.checkForeignKey(cat_tables)
self.assertEqual(cat_mock.getCatalogTables.call_count, 0)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_REMOVE)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
fast_seq_mock.assert_called_once_with(self.db_connection)
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__with_arg(self, process_foreign_key_mock):
cat_mock = Mock()
self.subject.GV.catalog = cat_mock
cat_tables = ["input1", "input2"]
self.subject.checkForeignKey(cat_tables)
self.assertEqual(cat_mock.getCatalogTables.call_count, 0)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__no_arg(self, process_foreign_key_mock):
cat_mock = Mock(spec=['getCatalogTables'])
cat_tables = ["input1", "input2"]
cat_mock.getCatalogTables.return_value = cat_tables
self.subject.GV.catalog = cat_mock
self.subject.checkForeignKey()
self.assertEqual(cat_mock.getCatalogTables.call_count, 1)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
# Test gpcheckat -C option with checkForeignKey
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gpcheckcat.checkTableMissingEntry')
def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3):
self.subject.checkForeignKey = Mock()
gpcat_class_mock = Mock(spec=['getCatalogTable'])
cat_obj_mock = Mock()
self.subject.getCatObj = gpcat_class_mock
self.subject.getCatObj.return_value = cat_obj_mock
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
self.db_connection.query.return_value.getresult.return_value = [['4.3']]
self.db_connection.query.return_value.dictresult.return_value = primaries
self.subject.GV.opt['-C'] = 'pg_class'
testargs = ['gpcheckcat', '-port 1', '-C pg_class']
with patch.object(sys, 'argv', testargs):
self.subject.main()
self.subject.getCatObj.assert_called_once_with(' pg_class')
self.subject.checkForeignKey.assert_called_once_with([cat_obj_mock])
@patch('gpcheckcat.checkTableMissingEntry', return_value = None)
def test_checkMissingEntry__no_issues(self, mock1):
cat_mock = Mock()
cat_tables = ["input1", "input2"]
cat_mock.getCatalogTables.return_value = cat_tables
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertTrue(self.subject.GV.missingEntryStatus)
self.subject.setError.assert_not_called()
@patch('gpcheckcat.checkTableMissingEntry', return_value= {("pg_clas", "oid"): "extra"})
@patch('gpcheckcat.getPrimaryKeyColumn', return_value = (None,"oid"))
def test_checkMissingEntry__uses_oid(self, mock1, mock2):
self.subject.GV.opt['-E'] = True
aTable = Mock(spec=GPCatalogTable)
cat_mock = Mock()
cat_mock.getCatalogTables.return_value = [aTable]
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertEqual(aTable.getPrimaryKey.call_count, 1)
self.subject.setError.assert_called_once_with(self.subject.ERROR_REMOVE)
@patch('gpcheckcat.checkTableMissingEntry', return_value= {("pg_operator", "typename, typenamespace"): "extra"})
@patch('gpcheckcat.getPrimaryKeyColumn', return_value = (None,None))
def test_checkMissingEntry__uses_pkeys(self, mock1, mock2):
self.subject.GV.opt['-E'] = True
aTable = MagicMock(spec=GPCatalogTable)
aTable.tableHasConsistentOids.return_value = False
cat_mock = Mock()
cat_mock.getCatalogTables.return_value = [aTable]
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertEqual(aTable.getPrimaryKey.call_count, 1)
self.subject.setError.assert_called_once_with(self.subject.ERROR_REMOVE)
def test_getReportConfiguration_uses_contentid(self):
report_cfg = self.subject.getReportConfiguration()
self.assertEqual("content -1", report_cfg[-1]['segname'])
def test_RelationObject_reportAllIssues_handles_None_fields(self):
uut = self.subject.RelationObject(None, 'pg_class')
uut.setRelInfo(relname=None, nspname=None, relkind='t', paroid=0)
uut.reportAllIssues()
log_messages = [args[0][1].strip() for args in self.subject.logger.log.call_args_list]
self.assertIn('Relation oid: N/A', log_messages)
self.assertIn('Relation schema: N/A', log_messages)
self.assertIn('Relation name: N/A', log_messages)
####################### PRIVATE METHODS #######################
def _run_batch_size_experiment(self, num_primaries):
BATCH_SIZE = 4
self.subject.GV.opt['-B'] = BATCH_SIZE
self.num_batches = 0
self.num_joins = 0
self.num_starts = 0
self.is_remainder_case = False
for i in range(2, num_primaries):
self.subject.GV.cfg[i] = dict(hostname='host1', port=123, id=1, address='123',
datadir='dir', content=1, dbid=i)
def count_starts():
self.num_starts += 1
def count_joins():
if self.num_starts != BATCH_SIZE:
self.is_remainder_case = True
self.num_joins += 1
if self.num_joins == BATCH_SIZE:
self.num_batches += 1
self.num_joins = 0
self.num_starts = 0
if __name__ == '__main__':
run_tests()
| 50wu/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py | Python | apache-2.0 | 16,180 |
import inspect
import operator
import types as pytypes
import typing as pt
from collections import OrderedDict
from collections.abc import Sequence
from llvmlite import ir as llvmir
from numba import njit
from numba.core import cgutils, errors, imputils, types, utils
from numba.core.datamodel import default_manager, models
from numba.core.registry import cpu_target
from numba.core.typing import templates
from numba.core.typing.asnumbatype import as_numba_type
from numba.core.serialize import disable_pickling
from numba.experimental.jitclass import _box
##############################################################################
# Data model
class InstanceModel(models.StructModel):
def __init__(self, dmm, fe_typ):
cls_data_ty = types.ClassDataType(fe_typ)
# MemInfoPointer uses the `dtype` attribute to traverse for nested
# NRT MemInfo. Since we handle nested NRT MemInfo ourselves,
# we will replace provide MemInfoPointer with an opaque type
# so that it does not raise exception for nested meminfo.
dtype = types.Opaque('Opaque.' + str(cls_data_ty))
members = [
('meminfo', types.MemInfoPointer(dtype)),
('data', types.CPointer(cls_data_ty)),
]
super(InstanceModel, self).__init__(dmm, fe_typ, members)
class InstanceDataModel(models.StructModel):
def __init__(self, dmm, fe_typ):
clsty = fe_typ.class_type
members = [(_mangle_attr(k), v) for k, v in clsty.struct.items()]
super(InstanceDataModel, self).__init__(dmm, fe_typ, members)
default_manager.register(types.ClassInstanceType, InstanceModel)
default_manager.register(types.ClassDataType, InstanceDataModel)
default_manager.register(types.ClassType, models.OpaqueModel)
def _mangle_attr(name):
"""
Mangle attributes.
The resulting name does not startswith an underscore '_'.
"""
return 'm_' + name
##############################################################################
# Class object
_ctor_template = """
def ctor({args}):
return __numba_cls_({args})
"""
def _getargs(fn_sig):
"""
Returns list of positional and keyword argument names in order.
"""
params = fn_sig.parameters
args = []
for k, v in params.items():
if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:
args.append(k)
else:
msg = "%s argument type unsupported in jitclass" % v.kind
raise errors.UnsupportedError(msg)
return args
@disable_pickling
class JitClassType(type):
"""
The type of any jitclass.
"""
def __new__(cls, name, bases, dct):
if len(bases) != 1:
raise TypeError("must have exactly one base class")
[base] = bases
if isinstance(base, JitClassType):
raise TypeError("cannot subclass from a jitclass")
assert 'class_type' in dct, 'missing "class_type" attr'
outcls = type.__new__(cls, name, bases, dct)
outcls._set_init()
return outcls
def _set_init(cls):
"""
Generate a wrapper for calling the constructor from pure Python.
Note the wrapper will only accept positional arguments.
"""
init = cls.class_type.instance_type.methods['__init__']
init_sig = utils.pysignature(init)
# get postitional and keyword arguments
# offset by one to exclude the `self` arg
args = _getargs(init_sig)[1:]
cls._ctor_sig = init_sig
ctor_source = _ctor_template.format(args=', '.join(args))
glbls = {"__numba_cls_": cls}
exec(ctor_source, glbls)
ctor = glbls['ctor']
cls._ctor = njit(ctor)
def __instancecheck__(cls, instance):
if isinstance(instance, _box.Box):
return instance._numba_type_.class_type is cls.class_type
return False
def __call__(cls, *args, **kwargs):
# The first argument of _ctor_sig is `cls`, which here
# is bound to None and then skipped when invoking the constructor.
bind = cls._ctor_sig.bind(None, *args, **kwargs)
bind.apply_defaults()
return cls._ctor(*bind.args[1:], **bind.kwargs)
##############################################################################
# Registration utils
def _validate_spec(spec):
for k, v in spec.items():
if not isinstance(k, str):
raise TypeError("spec keys should be strings, got %r" % (k,))
if not isinstance(v, types.Type):
raise TypeError("spec values should be Numba type instances, got %r"
% (v,))
def _fix_up_private_attr(clsname, spec):
"""
Apply the same changes to dunder names as CPython would.
"""
out = OrderedDict()
for k, v in spec.items():
if k.startswith('__') and not k.endswith('__'):
k = '_' + clsname + k
out[k] = v
return out
def _add_linking_libs(context, call):
"""
Add the required libs for the callable to allow inlining.
"""
libs = getattr(call, "libs", ())
if libs:
context.add_linking_libs(libs)
def register_class_type(cls, spec, class_ctor, builder):
"""
Internal function to create a jitclass.
Args
----
cls: the original class object (used as the prototype)
spec: the structural specification contains the field types.
class_ctor: the numba type to represent the jitclass
builder: the internal jitclass builder
"""
# Normalize spec
if spec is None:
spec = OrderedDict()
elif isinstance(spec, Sequence):
spec = OrderedDict(spec)
# Extend spec with class annotations.
for attr, py_type in pt.get_type_hints(cls).items():
if attr not in spec:
spec[attr] = as_numba_type(py_type)
_validate_spec(spec)
# Fix up private attribute names
spec = _fix_up_private_attr(cls.__name__, spec)
# Copy methods from base classes
clsdct = {}
for basecls in reversed(inspect.getmro(cls)):
clsdct.update(basecls.__dict__)
methods, props, static_methods, others = {}, {}, {}, {}
for k, v in clsdct.items():
if isinstance(v, pytypes.FunctionType):
methods[k] = v
elif isinstance(v, property):
props[k] = v
elif isinstance(v, staticmethod):
static_methods[k] = v
else:
others[k] = v
# Check for name shadowing
shadowed = (set(methods) | set(props) | set(static_methods)) & set(spec)
if shadowed:
raise NameError("name shadowing: {0}".format(', '.join(shadowed)))
docstring = others.pop('__doc__', "")
_drop_ignored_attrs(others)
if others:
msg = "class members are not yet supported: {0}"
members = ', '.join(others.keys())
raise TypeError(msg.format(members))
for k, v in props.items():
if v.fdel is not None:
raise TypeError("deleter is not supported: {0}".format(k))
jit_methods = {k: njit(v) for k, v in methods.items()}
jit_props = {}
for k, v in props.items():
dct = {}
if v.fget:
dct['get'] = njit(v.fget)
if v.fset:
dct['set'] = njit(v.fset)
jit_props[k] = dct
jit_static_methods = {
k: njit(v.__func__) for k, v in static_methods.items()}
# Instantiate class type
class_type = class_ctor(
cls,
ConstructorTemplate,
spec,
jit_methods,
jit_props,
jit_static_methods)
jit_class_dct = dict(class_type=class_type, __doc__=docstring)
jit_class_dct.update(jit_static_methods)
cls = JitClassType(cls.__name__, (cls,), jit_class_dct)
# Register resolution of the class object
typingctx = cpu_target.typing_context
typingctx.insert_global(cls, class_type)
# Register class
targetctx = cpu_target.target_context
builder(class_type, typingctx, targetctx).register()
as_numba_type.register(cls, class_type.instance_type)
return cls
class ConstructorTemplate(templates.AbstractTemplate):
"""
Base class for jitclass constructor templates.
"""
def generic(self, args, kws):
# Redirect resolution to __init__
instance_type = self.key.instance_type
ctor = instance_type.jit_methods['__init__']
boundargs = (instance_type.get_reference_type(),) + args
disp_type = types.Dispatcher(ctor)
sig = disp_type.get_call_type(self.context, boundargs, kws)
if not isinstance(sig.return_type, types.NoneType):
raise TypeError(
f"__init__() should return None, not '{sig.return_type}'")
# Actual constructor returns an instance value (not None)
out = templates.signature(instance_type, *sig.args[1:])
return out
def _drop_ignored_attrs(dct):
# ignore anything defined by object
drop = set(['__weakref__',
'__module__',
'__dict__'])
if '__annotations__' in dct:
drop.add('__annotations__')
for k, v in dct.items():
if isinstance(v, (pytypes.BuiltinFunctionType,
pytypes.BuiltinMethodType)):
drop.add(k)
elif getattr(v, '__objclass__', None) is object:
drop.add(k)
for k in drop:
del dct[k]
class ClassBuilder(object):
"""
A jitclass builder for a mutable jitclass. This will register
typing and implementation hooks to the given typing and target contexts.
"""
class_impl_registry = imputils.Registry('jitclass builder')
implemented_methods = set()
def __init__(self, class_type, typingctx, targetctx):
self.class_type = class_type
self.typingctx = typingctx
self.targetctx = targetctx
def register(self):
"""
Register to the frontend and backend.
"""
# Register generic implementations for all jitclasses
self._register_methods(self.class_impl_registry,
self.class_type.instance_type)
# NOTE other registrations are done at the top-level
# (see ctor_impl and attr_impl below)
self.targetctx.install_registry(self.class_impl_registry)
def _register_methods(self, registry, instance_type):
"""
Register method implementations.
This simply registers that the method names are valid methods. Inside
of imp() below we retrieve the actual method to run from the type of
the reciever argument (i.e. self).
"""
to_register = list(instance_type.jit_methods) + \
list(instance_type.jit_static_methods)
for meth in to_register:
# There's no way to retrieve the particular method name
# inside the implementation function, so we have to register a
# specific closure for each different name
if meth not in self.implemented_methods:
self._implement_method(registry, meth)
self.implemented_methods.add(meth)
def _implement_method(self, registry, attr):
# create a separate instance of imp method to avoid closure clashing
def get_imp():
def imp(context, builder, sig, args):
instance_type = sig.args[0]
if attr in instance_type.jit_methods:
method = instance_type.jit_methods[attr]
elif attr in instance_type.jit_static_methods:
method = instance_type.jit_static_methods[attr]
# imp gets called as a method, where the first argument is
# self. We drop this for a static method.
sig = sig.replace(args=sig.args[1:])
args = args[1:]
disp_type = types.Dispatcher(method)
call = context.get_function(disp_type, sig)
out = call(builder, args)
_add_linking_libs(context, call)
return imputils.impl_ret_new_ref(context, builder,
sig.return_type, out)
return imp
def _getsetitem_gen(getset):
_dunder_meth = "__%s__" % getset
op = getattr(operator, getset)
@templates.infer_global(op)
class GetSetItem(templates.AbstractTemplate):
def generic(self, args, kws):
instance = args[0]
if isinstance(instance, types.ClassInstanceType) and \
_dunder_meth in instance.jit_methods:
meth = instance.jit_methods[_dunder_meth]
disp_type = types.Dispatcher(meth)
sig = disp_type.get_call_type(self.context, args, kws)
return sig
# lower both {g,s}etitem and __{g,s}etitem__ to catch the calls
# from python and numba
imputils.lower_builtin((types.ClassInstanceType, _dunder_meth),
types.ClassInstanceType,
types.VarArg(types.Any))(get_imp())
imputils.lower_builtin(op,
types.ClassInstanceType,
types.VarArg(types.Any))(get_imp())
dunder_stripped = attr.strip('_')
if dunder_stripped in ("getitem", "setitem"):
_getsetitem_gen(dunder_stripped)
else:
registry.lower((types.ClassInstanceType, attr),
types.ClassInstanceType,
types.VarArg(types.Any))(get_imp())
@templates.infer_getattr
class ClassAttribute(templates.AttributeTemplate):
key = types.ClassInstanceType
def generic_resolve(self, instance, attr):
if attr in instance.struct:
# It's a struct field => the type is well-known
return instance.struct[attr]
elif attr in instance.jit_methods:
# It's a jitted method => typeinfer it
meth = instance.jit_methods[attr]
disp_type = types.Dispatcher(meth)
class MethodTemplate(templates.AbstractTemplate):
key = (self.key, attr)
def generic(self, args, kws):
args = (instance,) + tuple(args)
sig = disp_type.get_call_type(self.context, args, kws)
return sig.as_method()
return types.BoundFunction(MethodTemplate, instance)
elif attr in instance.jit_static_methods:
# It's a jitted method => typeinfer it
meth = instance.jit_static_methods[attr]
disp_type = types.Dispatcher(meth)
class StaticMethodTemplate(templates.AbstractTemplate):
key = (self.key, attr)
def generic(self, args, kws):
# Don't add instance as the first argument for a static
# method.
sig = disp_type.get_call_type(self.context, args, kws)
# sig itself does not include ClassInstanceType as it's
# first argument, so instead of calling sig.as_method()
# we insert the recvr. This is equivalent to
# sig.replace(args=(instance,) + sig.args).as_method().
return sig.replace(recvr=instance)
return types.BoundFunction(StaticMethodTemplate, instance)
elif attr in instance.jit_props:
# It's a jitted property => typeinfer its getter
impdct = instance.jit_props[attr]
getter = impdct['get']
disp_type = types.Dispatcher(getter)
sig = disp_type.get_call_type(self.context, (instance,), {})
return sig.return_type
@ClassBuilder.class_impl_registry.lower_getattr_generic(types.ClassInstanceType)
def get_attr_impl(context, builder, typ, value, attr):
"""
Generic getattr() for @jitclass instances.
"""
if attr in typ.struct:
# It's a struct field
inst = context.make_helper(builder, typ, value=value)
data_pointer = inst.data
data = context.make_data_helper(builder, typ.get_data_type(),
ref=data_pointer)
return imputils.impl_ret_borrowed(context, builder,
typ.struct[attr],
getattr(data, _mangle_attr(attr)))
elif attr in typ.jit_props:
# It's a jitted property
getter = typ.jit_props[attr]['get']
sig = templates.signature(None, typ)
dispatcher = types.Dispatcher(getter)
sig = dispatcher.get_call_type(context.typing_context, [typ], {})
call = context.get_function(dispatcher, sig)
out = call(builder, [value])
_add_linking_libs(context, call)
return imputils.impl_ret_new_ref(context, builder, sig.return_type, out)
raise NotImplementedError('attribute {0!r} not implemented'.format(attr))
@ClassBuilder.class_impl_registry.lower_setattr_generic(types.ClassInstanceType)
def set_attr_impl(context, builder, sig, args, attr):
"""
Generic setattr() for @jitclass instances.
"""
typ, valty = sig.args
target, val = args
if attr in typ.struct:
# It's a struct member
inst = context.make_helper(builder, typ, value=target)
data_ptr = inst.data
data = context.make_data_helper(builder, typ.get_data_type(),
ref=data_ptr)
# Get old value
attr_type = typ.struct[attr]
oldvalue = getattr(data, _mangle_attr(attr))
# Store n
setattr(data, _mangle_attr(attr), val)
context.nrt.incref(builder, attr_type, val)
# Delete old value
context.nrt.decref(builder, attr_type, oldvalue)
elif attr in typ.jit_props:
# It's a jitted property
setter = typ.jit_props[attr]['set']
disp_type = types.Dispatcher(setter)
sig = disp_type.get_call_type(context.typing_context,
(typ, valty), {})
call = context.get_function(disp_type, sig)
call(builder, (target, val))
_add_linking_libs(context, call)
else:
raise NotImplementedError(
'attribute {0!r} not implemented'.format(attr))
def imp_dtor(context, module, instance_type):
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
dtor_ftype = llvmir.FunctionType(llvmir.VoidType(),
[llvoidptr, llsize, llvoidptr])
fname = "_Dtor.{0}".format(instance_type.name)
dtor_fn = cgutils.get_or_insert_function(module, dtor_ftype, fname)
if dtor_fn.is_declaration:
# Define
builder = llvmir.IRBuilder(dtor_fn.append_basic_block())
alloc_fe_type = instance_type.get_data_type()
alloc_type = context.get_value_type(alloc_fe_type)
ptr = builder.bitcast(dtor_fn.args[0], alloc_type.as_pointer())
data = context.make_helper(builder, alloc_fe_type, ref=ptr)
context.nrt.decref(builder, alloc_fe_type, data._getvalue())
builder.ret_void()
return dtor_fn
@ClassBuilder.class_impl_registry.lower(types.ClassType,
types.VarArg(types.Any))
def ctor_impl(context, builder, sig, args):
"""
Generic constructor (__new__) for jitclasses.
"""
# Allocate the instance
inst_typ = sig.return_type
alloc_type = context.get_data_type(inst_typ.get_data_type())
alloc_size = context.get_abi_sizeof(alloc_type)
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
imp_dtor(context, builder.module, inst_typ),
)
data_pointer = context.nrt.meminfo_data(builder, meminfo)
data_pointer = builder.bitcast(data_pointer,
alloc_type.as_pointer())
# Nullify all data
builder.store(cgutils.get_null_value(alloc_type),
data_pointer)
inst_struct = context.make_helper(builder, inst_typ)
inst_struct.meminfo = meminfo
inst_struct.data = data_pointer
# Call the jitted __init__
# TODO: extract the following into a common util
init_sig = (sig.return_type,) + sig.args
init = inst_typ.jit_methods['__init__']
disp_type = types.Dispatcher(init)
call = context.get_function(disp_type, types.void(*init_sig))
_add_linking_libs(context, call)
realargs = [inst_struct._getvalue()] + list(args)
call(builder, realargs)
# Prepare return value
ret = inst_struct._getvalue()
return imputils.impl_ret_new_ref(context, builder, inst_typ, ret)
| cpcloud/numba | numba/experimental/jitclass/base.py | Python | bsd-2-clause | 20,859 |
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around swig.
Sets the SWIG_LIB environment var to point to Lib dir
and defers control to the platform-specific swig binary.
Depends on swig binaries being available at ../../third_party/swig.
"""
import os
import subprocess
import sys
def main():
swig_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
os.pardir, os.pardir, 'third_party', 'swig'))
lib_dir = os.path.join(swig_dir, "Lib")
os.putenv("SWIG_LIB", lib_dir)
dir_map = {
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
# Swig documentation lies that platform macros are provided to swig
# preprocessor. Provide them ourselves.
platform_flags = {
'darwin': '-DSWIGMAC',
'linux2': '-DSWIGLINUX',
'win32': '-DSWIGWIN',
}
swig_bin = os.path.join(swig_dir, dir_map[sys.platform], 'swig')
args = [swig_bin, platform_flags[sys.platform]] + sys.argv[1:]
args = [x.replace('/', os.sep) for x in args]
print "Executing", args
sys.exit(subprocess.call(args))
if __name__ == "__main__":
main()
| Crystalnix/house-of-life-chromium | tools/swig/swig.py | Python | bsd-3-clause | 1,265 |
from math import *
octree_node_size = 112
def recurse(depth):
if depth == 0:
return 1
else:
return pow(8, depth) + recurse(depth - 1)
def octree_size(depth):
return recurse(depth) * octree_node_size
print("Size %d" % (octree_size(3)))
| galek/anki-3d-engine | docs/drafts/octree.py | Python | bsd-3-clause | 246 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
length = len(match.group(1))
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)'
r'(?:, codename (.*))?(?i)', change_info)
if match is None:
continue
datestr, codename = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('logbook/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'release', 'sdist', 'upload']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '-dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)' % (release_date.date(), date.today()))
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
| Rafiot/logbook | scripts/make-release.py | Python | bsd-3-clause | 4,069 |
from __future__ import absolute_import
import logging
import six
from datetime import timedelta
from django.utils import timezone
from sentry.models import Option
from sentry.options import default_manager
from sentry.options.manager import UnknownOption
from sentry.tasks.base import instrumented_task
ONE_HOUR = 60 * 60
logger = logging.getLogger("sentry")
@instrumented_task(name="sentry.tasks.options.sync_options", queue="options")
def sync_options(cutoff=ONE_HOUR):
"""
Ensures all options that have been updated (within the database) since
``cutoff`` have their correct values stored in the cache.
This **does not** guarantee that the correct value is written into the cache
though it will correct itself in the next update window.
"""
cutoff_dt = timezone.now() - timedelta(seconds=cutoff)
# TODO(dcramer): this doesnt handle deleted options (which shouldn't be allowed)
for option in Option.objects.filter(last_updated__gte=cutoff_dt).iterator():
try:
opt = default_manager.lookup_key(option.key)
default_manager.store.set_cache(opt, option.value)
except UnknownOption as e:
logger.exception(six.text_type(e))
| mvaled/sentry | src/sentry/tasks/options.py | Python | bsd-3-clause | 1,214 |
# -*- coding: utf-8 -*-
"Main module defining filesystem and file classes"
from __future__ import absolute_import
import ctypes
import logging
import os
import posixpath
import re
import warnings
import operator
import functools
from collections import deque
from .compatibility import FileNotFoundError, ConnectionError, PY3
from .conf import conf
from .utils import (read_block, seek_delimiter, ensure_bytes, ensure_string,
ensure_trailing_slash, MyNone)
logger = logging.getLogger(__name__)
_lib = None
DEFAULT_READ_BUFFER_SIZE = 2 ** 16
DEFAULT_WRITE_BUFFER_SIZE = 2 ** 26
def _nbytes(buf):
buf = memoryview(buf)
if PY3:
return buf.nbytes
return buf.itemsize * functools.reduce(operator.mul, buf.shape)
class HDFileSystem(object):
""" Connection to an HDFS namenode
>>> hdfs = HDFileSystem(host='127.0.0.1', port=8020) # doctest: +SKIP
"""
_first_pid = None
def __init__(self, host=MyNone, port=MyNone, connect=True, autoconf=True,
pars=None, **kwargs):
"""
Parameters
----------
host: str; port: int
Overrides which take precedence over information in conf files and
other passed parameters
connect: bool (True)
Whether to automatically attempt to establish a connection to the
name-node.
autoconf: bool (True)
Whether to use the configuration found in the conf module as
the set of defaults
pars : {str: str}
any parameters for hadoop, that you can find in hdfs-site.xml,
https://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml
This dict looks exactly like the one produced by conf - you can,
for example, remove any problematic entries.
kwargs: key/value
Further override parameters.
These are applied after the default conf and pars; the most typical
things to set are:
host : str (localhost)
namenode hostname or IP address, in case of HA mode it is name
of the cluster that can be found in "fs.defaultFS" option.
port : int (8020)
namenode RPC port usually 8020, in HA mode port mast be None
user, ticket_cache, token, effective_user : str
kerberos things
"""
self.conf = conf.copy() if autoconf else {}
if pars:
self.conf.update(pars)
self.conf.update(kwargs)
if host is not MyNone:
self.conf['host'] = host
if port is not MyNone:
self.conf['port'] = port
self._handle = None
if self.conf.get('ticket_cache') and self.conf.get('token'):
m = "It is not possible to use ticket_cache and token at same time"
raise RuntimeError(m)
if connect:
self.connect()
def __getstate__(self):
d = self.__dict__.copy()
del d['_handle']
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self._handle = None
self.connect()
def connect(self):
""" Connect to the name node
This happens automatically at startup
"""
get_lib()
conf = self.conf.copy()
if self._handle:
return
if HDFileSystem._first_pid is None:
HDFileSystem._first_pid = os.getpid()
elif HDFileSystem._first_pid != os.getpid():
warnings.warn("Attempting to re-use hdfs3 in child process %d, "
"but it was initialized in parent process %d. "
"Beware that hdfs3 is not fork-safe and this may "
"lead to bugs or crashes."
% (os.getpid(), HDFileSystem._first_pid),
RuntimeWarning, stacklevel=2)
o = _lib.hdfsNewBuilder()
_lib.hdfsBuilderSetNameNode(o, ensure_bytes(conf.pop('host')))
port = conf.pop('port', None)
if port is not None:
_lib.hdfsBuilderSetNameNodePort(o, port)
user = conf.pop('user', None)
if user is not None:
_lib.hdfsBuilderSetUserName(o, ensure_bytes(user))
effective_user = ensure_bytes(conf.pop('effective_user', None))
ticket_cache = conf.pop('ticket_cache', None)
if ticket_cache is not None:
_lib.hdfsBuilderSetKerbTicketCachePath(o, ensure_bytes(ticket_cache))
token = conf.pop('token', None)
if token is not None:
_lib.hdfsBuilderSetToken(o, ensure_bytes(token))
for par, val in conf.items():
if not _lib.hdfsBuilderConfSetStr(o, ensure_bytes(par),
ensure_bytes(val)) == 0:
warnings.warn('Setting conf parameter %s failed' % par)
fs = _lib.hdfsBuilderConnect(o, effective_user)
_lib.hdfsFreeBuilder(o)
if fs:
logger.debug("Connect to handle %d", fs.contents.filesystem)
self._handle = fs
else:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise ConnectionError('Connection Failed: {}'.format(msg))
def delegate_token(self, user=None):
"""Generate delegate auth token.
Parameters
----------
user: bytes/str
User to pass to delegation (defaults to user supplied to instance);
this user is the only one that can renew the token.
"""
if user is None and self.user is None:
raise ValueError('Delegation requires a user')
user = user or self.user
out = _lib.hdfsGetDelegationToken(self._handle, ensure_bytes(user))
if out:
self.token = out
return out
else:
raise RuntimeError('Token delegation failed')
def renew_token(self, token=None):
"""
Renew delegation token
Parameters
----------
token: str or None
If None, uses the instance's token. It is an error to do that if
there is no token.
Returns
-------
New expiration time for the token
"""
token = token or self.token
if token is None:
raise ValueError('There is no token to renew')
return _lib.hdfsRenewDelegationToken(self._handle, ensure_bytes(token))
def cancel_token(self, token=None):
"""
Revoke delegation token
Parameters
----------
token: str or None
If None, uses the instance's token. It is an error to do that if
there is no token.
"""
token = token or self.token
if token is None:
raise ValueError('There is no token to cancel')
out = _lib.hdfsCancelDelegationToken(self._handle, ensure_bytes(token))
if out:
raise RuntimeError('Token cancel failed')
if token == self.token:
# now our token is invalid - this FS may not work
self.token = None
def disconnect(self):
""" Disconnect from name node """
if self._handle:
logger.debug("Disconnect from handle %d",
self._handle.contents.filesystem)
_lib.hdfsDisconnect(self._handle)
self._handle = None
def open(self, path, mode='rb', replication=0, buff=0, block_size=0):
""" Open a file for reading or writing
Parameters
----------
path: string
Path of file on HDFS
mode: string
One of 'rb', 'wb', or 'ab'
replication: int
Replication factor; if zero, use system default (only on write)
buf: int (=0)
Client buffer size (bytes); if 0, use default.
block_size: int
Size of data-node blocks if writing
"""
if not self._handle:
raise IOError("Filesystem not connected")
if block_size and mode != 'wb':
raise ValueError('Block size only valid when writing new file')
if ('a' in mode and self.exists(path) and
replication != 0 and replication > 1):
raise IOError("Appending to an existing file with replication > 1"
" is unsupported")
if 'b' not in mode:
raise NotImplementedError("Text mode not supported, use mode='%s'"
" and manage bytes" % (mode + 'b'))
return HDFile(self, path, mode, replication=replication, buff=buff,
block_size=block_size)
def du(self, path, total=False, deep=False):
"""Returns file sizes on a path.
Parameters
----------
path : string
where to look
total : bool (False)
to add up the sizes to a grand total
deep : bool (False)
whether to recurse into subdirectories
"""
fi = self.ls(path, True)
if deep:
for apath in fi:
if apath['kind'] == 'directory':
fi.extend(self.ls(apath['name'], True))
if total:
return {path: sum(f['size'] for f in fi)}
return {p['name']: p['size'] for p in fi}
def df(self):
""" Used/free disc space on the HDFS system """
cap = _lib.hdfsGetCapacity(self._handle)
used = _lib.hdfsGetUsed(self._handle)
return {'capacity': cap,
'used': used,
'percent-free': 100 * (cap - used) / cap}
def get_block_locations(self, path, start=0, length=0):
""" Fetch physical locations of blocks """
if not self._handle:
raise IOError("Filesystem not connected")
start = int(start) or 0
length = int(length) or self.info(path)['size']
nblocks = ctypes.c_int(0)
out = _lib.hdfsGetFileBlockLocations(self._handle,
ensure_bytes(path),
ctypes.c_int64(start),
ctypes.c_int64(length),
ctypes.byref(nblocks))
locs = []
for i in range(nblocks.value):
block = out[i]
hosts = [block.hosts[i] for i in
range(block.numOfNodes)]
locs.append({'hosts': hosts, 'length': block.length,
'offset': block.offset})
_lib.hdfsFreeFileBlockLocations(out, nblocks)
return locs
def info(self, path):
""" File information (as a dict) """
if not self.exists(path):
raise FileNotFoundError(path)
fi = _lib.hdfsGetPathInfo(self._handle, ensure_bytes(path)).contents
out = fi.to_dict()
_lib.hdfsFreeFileInfo(ctypes.byref(fi), 1)
return out
def isdir(self, path):
"""Return True if path refers to an existing directory."""
try:
info = self.info(path)
return info['kind'] == 'directory'
except EnvironmentError:
return False
def isfile(self, path):
"""Return True if path refers to an existing file."""
try:
info = self.info(path)
return info['kind'] == 'file'
except EnvironmentError:
return False
def walk(self, path):
"""Directory tree generator, see ``os.walk``"""
full_dirs = []
dirs = []
files = []
for info in self.ls(path, True):
name = info['name']
tail = posixpath.split(name)[1]
if info['kind'] == 'directory':
full_dirs.append(name)
dirs.append(tail)
else:
files.append(tail)
yield path, dirs, files
for d in full_dirs:
for res in self.walk(d):
yield res
def glob(self, path):
""" Get list of paths mathing glob-like pattern (i.e., with "*"s).
If passed a directory, gets all contained files; if passed path
to a file, without any "*", returns one-element list containing that
filename. Does not support python3.5's "**" notation.
"""
path = ensure_string(path)
try:
f = self.info(path)
if f['kind'] == 'directory' and '*' not in path:
path = ensure_trailing_slash(path) + '*'
else:
return [f['name']]
except IOError:
pass
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = '/'
allpaths = []
for dirname, dirs, fils in self.walk(root):
allpaths.extend(posixpath.join(dirname, d) for d in dirs)
allpaths.extend(posixpath.join(dirname, f) for f in fils)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/')
.replace('*', '[^/]*')
.replace('?', '.') + "$")
return [p for p in allpaths
if pattern.match(p.replace('//', '/').rstrip('/'))]
def ls(self, path, detail=False):
""" List files at path
Parameters
----------
path : string/bytes
location at which to list files
detail : bool (=True)
if True, each list item is a dict of file properties;
otherwise, returns list of filenames
"""
if not self.exists(path):
raise FileNotFoundError(path)
num = ctypes.c_int(0)
fi = _lib.hdfsListDirectory(self._handle, ensure_bytes(path),
ctypes.byref(num))
out = [fi[i].to_dict() for i in range(num.value)]
_lib.hdfsFreeFileInfo(fi, num.value)
if detail:
return out
else:
return [o['name'] for o in out]
@property
def host(self):
return self.conf.get('host', '')
@property
def port(self):
return self.conf.get('port', '')
def __repr__(self):
if self._handle is None:
state = 'Disconnected'
else:
state = 'Connected'
return 'hdfs://%s:%s, %s' % (self.host, self.port, state)
def __del__(self):
if self._handle:
self.disconnect()
def mkdir(self, path):
""" Make directory at path """
out = _lib.hdfsCreateDirectory(self._handle, ensure_bytes(path))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Create directory failed: {}'.format(msg))
def makedirs(self, path, mode=0o711):
""" Create directory together with any necessary intermediates """
out = _lib.hdfsCreateDirectoryEx(self._handle, ensure_bytes(path),
ctypes.c_short(mode), 1)
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Create directory failed: {}'.format(msg))
def set_replication(self, path, replication):
""" Instruct HDFS to set the replication for the given file.
If successful, the head-node's table is updated immediately, but
actual copying will be queued for later. It is acceptable to set
a replication that cannot be supported (e.g., higher than the
number of data-nodes).
"""
if replication < 0:
raise ValueError('Replication must be positive,'
' or 0 for system default')
out = _lib.hdfsSetReplication(self._handle, ensure_bytes(path),
ctypes.c_int16(int(replication)))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Set replication failed: {}'.format(msg))
def mv(self, path1, path2):
""" Move file at path1 to path2 """
if not self.exists(path1):
raise FileNotFoundError(path1)
out = _lib.hdfsRename(self._handle, ensure_bytes(path1),
ensure_bytes(path2))
return out == 0
def concat(self, destination, paths):
"""Concatenate inputs to destination
Source files *should* all have the same block size and replication.
The destination file must be in the same directory as
the source files. If the target exists, it will be appended to.
Some HDFSs impose that the target file must exist and be an exact
number of blocks long, and that each concated file except the last
is also a whole number of blocks.
The source files are deleted on successful
completion.
"""
if not self.exists(destination):
self.touch(destination)
arr = (ctypes.c_char_p * (len(paths) + 1))()
arr[:-1] = [ensure_bytes(s) for s in paths]
arr[-1] = ctypes.c_char_p() # NULL pointer
out = _lib.hdfsConcat(self._handle, ensure_bytes(destination), arr)
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Concat failed on %s %s' % (destination, msg))
def rm(self, path, recursive=True):
"Use recursive for `rm -r`, i.e., delete directory and contents"
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsDelete(self._handle, ensure_bytes(path), bool(recursive))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Remove failed on %s %s' % (path, msg))
def exists(self, path):
""" Is there an entry at path? """
out = _lib.hdfsExists(self._handle, ensure_bytes(path))
return out == 0
def chmod(self, path, mode):
"""Change access control of given path
Exactly what permissions the file will get depends on HDFS
configurations.
Parameters
----------
path : string
file/directory to change
mode : integer
As with the POSIX standard, each octal digit refers to
user-group-all, in that order, with read-write-execute as the
bits of each group.
Examples
--------
Make read/writeable to all
>>> hdfs.chmod('/path/to/file', 0o777) # doctest: +SKIP
Make read/writeable only to user
>>> hdfs.chmod('/path/to/file', 0o700) # doctest: +SKIP
Make read-only to user
>>> hdfs.chmod('/path/to/file', 0o100) # doctest: +SKIP
"""
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsChmod(self._handle, ensure_bytes(path),
ctypes.c_short(mode))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("chmod failed on %s %s" % (path, msg))
def chown(self, path, owner, group):
""" Change owner/group """
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsChown(self._handle, ensure_bytes(path),
ensure_bytes(owner), ensure_bytes(group))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("chown failed on %s %s" % (path, msg))
def cat(self, path):
""" Return contents of file """
if not self.exists(path):
raise FileNotFoundError(path)
with self.open(path, 'rb') as f:
result = f.read()
return result
def get(self, hdfs_path, local_path, blocksize=DEFAULT_READ_BUFFER_SIZE):
""" Copy HDFS file to local """
# TODO: _lib.hdfsCopy() may do this more efficiently
if not self.exists(hdfs_path):
raise FileNotFoundError(hdfs_path)
with self.open(hdfs_path, 'rb') as f:
with open(local_path, 'wb') as f2:
out = 1
while out:
out = f.read(blocksize)
f2.write(out)
def getmerge(self, path, filename, blocksize=DEFAULT_READ_BUFFER_SIZE):
""" Concat all files in path (a directory) to local output file """
files = self.ls(path)
with open(filename, 'wb') as f2:
for apath in files:
with self.open(apath, 'rb') as f:
out = 1
while out:
out = f.read(blocksize)
f2.write(out)
def put(self, filename, path, chunk=DEFAULT_WRITE_BUFFER_SIZE, replication=0, block_size=0):
""" Copy local file to path in HDFS """
with self.open(path, 'wb', replication=replication,
block_size=block_size) as target:
with open(filename, 'rb') as source:
while True:
out = source.read(chunk)
if len(out) == 0:
break
target.write(out)
def tail(self, path, size=1024):
""" Return last bytes of file """
length = self.du(path)[ensure_trailing_slash(path)]
if size > length:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(length - size)
return f.read(size)
def head(self, path, size=1024):
""" Return first bytes of file """
with self.open(path, 'rb') as f:
return f.read(size)
def touch(self, path):
""" Create zero-length file """
self.open(path, 'wb').close()
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from an HDFS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned will not include the surrounding delimiter strings.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on HDFS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> hdfs.read_block('/data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> hdfs.read_block('/data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200'
See Also
--------
hdfs3.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.info()['size']
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def list_encryption_zones(self):
"""Get list of all the encryption zones"""
x = ctypes.c_int(8)
out = _lib.hdfsListEncryptionZones(self._handle, x)
if not out:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("EZ listing failed: %s" % msg)
res = [out[i].to_dict() for i in range(x.value)]
if res:
_lib.hdfsFreeEncryptionZoneInfo(out, x)
return res
def create_encryption_zone(self, path, key_name):
out = _lib.hdfsCreateEncryptionZone(self._handle, ensure_bytes(path),
ensure_bytes(key_name))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("EZ create failed: %s %s" % (path, msg))
def get_lib():
""" Import C-lib only on demand """
global _lib
if _lib is None:
from .lib import _lib as l
_lib = l
mode_numbers = {'w': 1, 'r': 0, 'a': 1025,
'wb': 1, 'rb': 0, 'ab': 1025}
class HDFile(object):
""" File on HDFS
Matches the standard Python file interface.
Examples
--------
>>> with hdfs.open('/path/to/hdfs/file.txt') as f: # doctest: +SKIP
... bytes = f.read(1000) # doctest: +SKIP
>>> with hdfs.open('/path/to/hdfs/file.csv') as f: # doctest: +SKIP
... df = pd.read_csv(f, nrows=1000) # doctest: +SKIP
"""
def __init__(self, fs, path, mode, replication=0, buff=0, block_size=0):
""" Called by open on a HDFileSystem """
if 't' in mode:
raise NotImplementedError("Opening a file in text mode is not"
" supported, use ``io.TextIOWrapper``.")
self.fs = fs
self.path = path
self.replication = replication
self.buff = buff
self._fs = fs._handle
self.buffers = []
self._handle = None
self.mode = mode
self.block_size = block_size
self.lines = deque([])
self._set_handle()
self.size = self.info()['size']
def _set_handle(self):
out = _lib.hdfsOpenFile(self._fs, ensure_bytes(self.path),
mode_numbers[self.mode], self.buff,
ctypes.c_short(self.replication),
ctypes.c_int64(self.block_size))
if not out:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("Could not open file: %s, mode: %s %s" %
(self.path, self.mode, msg))
self._handle = out
def readinto(self, length, out):
"""
Read up to ``length`` bytes from the file into the ``out`` buffer,
which can be of any type that implements the buffer protocol (example: ``bytearray``,
``memoryview`` (py3 only), numpy array, ...).
Parameters
----------
length : int
maximum number of bytes to read
out : buffer
where to write the output data
Returns
-------
int
number of bytes read
"""
if not _lib.hdfsFileIsOpenForRead(self._handle):
raise IOError('File not in read mode')
bufsize = length
bufpos = 0
# convert from buffer protocol to ctypes-compatible type
buflen = _nbytes(out)
buf_for_ctypes = (ctypes.c_byte * buflen).from_buffer(out)
while length:
bufp = ctypes.byref(buf_for_ctypes, bufpos)
ret = _lib.hdfsRead(
self._fs, self._handle, bufp, ctypes.c_int32(bufsize - bufpos))
if ret == 0: # EOF
break
if ret > 0:
length -= ret
bufpos += ret
else:
raise IOError('Read file %s Failed:' % self.path, -ret)
return bufpos
def read(self, length=None, out_buffer=None):
"""
Read up to ``length`` bytes from the file. Reads shorter than ``length``
only occur at the end of the file, if less data is available.
If ``out_buffer`` is given, read directly into ``out_buffer``. It can be
anything that implements the buffer protocol, for example ``bytearray``,
``memoryview`` (py3 only), numpy arrays, ...
Parameters
----------
length : int
number of bytes to read. if it is None, read all remaining bytes
from the current position.
out_buffer : buffer, None or True
the buffer to use as output, None to return bytes, True to create
and return new buffer
Returns
-------
bytes
the data read (only if out_buffer is None)
memoryview
the data read as a memoryview into the buffer
"""
return_buffer = out_buffer is not None
max_read = self.size - self.tell()
read_length = max_read if length in [None, -1] else length
read_length = min(max_read, read_length)
if out_buffer is None or out_buffer is True:
out_buffer = bytearray(read_length)
else:
if _nbytes(out_buffer) < read_length:
raise IOError('buffer too small (%d < %d)' % (_nbytes(out_buffer), read_length))
bytes_read = self.readinto(length=read_length, out=out_buffer)
if bytes_read < _nbytes(out_buffer):
out_buffer = memoryview(out_buffer)[:bytes_read]
if return_buffer:
return memoryview(out_buffer)
return memoryview(out_buffer).tobytes()
def readline(self, chunksize=0, lineterminator='\n'):
""" Return a line using buffered reading.
A line is a sequence of bytes between ``'\n'`` markers (or given
line-terminator).
Line iteration uses this method internally.
Note: this function requires many calls to HDFS and is slow; it is
in general better to wrap an HDFile with an ``io.TextIOWrapper`` for
buffering, text decoding and newline support.
"""
if chunksize == 0:
chunksize = self.buff if self.buff != 0 else DEFAULT_READ_BUFFER_SIZE
lineterminator = ensure_bytes(lineterminator)
start = self.tell()
seek_delimiter(self, lineterminator, chunksize, allow_zero=False)
end = self.tell()
self.seek(start)
return self.read(end - start)
def _genline(self):
while True:
out = self.readline()
if out:
yield out
else:
raise StopIteration
def __iter__(self):
""" Enables `for line in file:` usage """
return self._genline()
def __next__(self):
""" Enables reading a file as a buffer in pandas """
out = self.readline()
if out:
return out
else:
raise StopIteration
# PY2 compatibility
next = __next__
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def tell(self):
""" Get current byte location in a file """
out = _lib.hdfsTell(self._fs, self._handle)
if out == -1:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Tell Failed on file %s %s' % (self.path, msg))
return out
def seek(self, offset, from_what=0):
""" Set file read position. Read mode only.
Attempt to move out of file bounds raises an exception. Note that,
by the convention in python file seek, offset should be <=0 if
from_what is 2.
Parameters
----------
offset : int
byte location in the file.
from_what : int 0, 1, 2
if 0 (befault), relative to file start; if 1, relative to current
location; if 2, relative to file end.
Returns
-------
new position
"""
if from_what not in {0, 1, 2}:
raise ValueError('seek mode must be 0, 1 or 2')
info = self.info()
if from_what == 1:
offset = offset + self.tell()
elif from_what == 2:
offset = info['size'] + offset
if offset < 0 or offset > info['size']:
raise ValueError('Attempt to seek outside file')
out = _lib.hdfsSeek(self._fs, self._handle, ctypes.c_int64(offset))
if out == -1: # pragma: no cover
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Seek Failed on file %s' % (self.path, msg))
return self.tell()
def info(self):
""" Filesystem metadata about this file """
return self.fs.info(self.path)
def write(self, data):
""" Write bytes to open file (which must be in w or a mode) """
data = ensure_bytes(data)
if not data:
return
if not _lib.hdfsFileIsOpenForWrite(self._handle):
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('File not write mode: {}'.format(msg))
write_block = self.buff if self.buff != 0 else DEFAULT_WRITE_BUFFER_SIZE
for offset in range(0, len(data), write_block):
d = ensure_bytes(data[offset:offset + write_block])
if not _lib.hdfsWrite(self._fs, self._handle, d, len(d)) == len(d):
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Write failed on file %s, %s' % (self.path, msg))
return len(data)
def flush(self):
""" Send buffer to the data-node; actual write may happen later """
_lib.hdfsFlush(self._fs, self._handle)
def close(self):
""" Flush and close file, ensuring the data is readable """
self.flush()
_lib.hdfsCloseFile(self._fs, self._handle)
self._handle = None # _libhdfs releases memory
self.mode = 'closed'
@property
def read1(self):
return self.read
@property
def closed(self):
return self.mode == 'closed'
def writable(self):
return self.mode.startswith('w') or self.mode.startswith('a')
def seekable(self):
return self.readable()
def readable(self):
return self.mode.startswith('r')
def __del__(self):
self.close()
def __repr__(self):
return 'hdfs://%s:%s%s, %s' % (self.fs.host, self.fs.port,
self.path, self.mode)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| blaze/hdfs3 | hdfs3/core.py | Python | bsd-3-clause | 34,058 |
from framework.db import models
from framework.config import config
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ResourceInterface
from framework.lib.general import cprint
import os
import logging
from framework.utils import FileOperations
class ResourceDB(BaseComponent, ResourceInterface):
COMPONENT_NAME = "resource"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db_config = self.get_component("db_config")
self.target = self.get_component("target")
self.db = self.get_component("db")
self.LoadResourceDBFromFile(self.config.get_profile_path("RESOURCES_PROFILE"))
def LoadResourceDBFromFile(self, file_path): # This needs to be a list instead of a dictionary to preserve order in python < 2.7
logging.info("Loading Resources from: " + file_path + " ..")
resources = self.GetResourcesFromFile(file_path)
# Delete all old resources which are not edited by user
# because we may have updated the resource
self.db.session.query(models.Resource).filter_by(dirty=False).delete()
# resources = [(Type, Name, Resource), (Type, Name, Resource),]
for Type, Name, Resource in resources:
self.db.session.add(models.Resource(resource_type=Type, resource_name=Name, resource=Resource))
self.db.session.commit()
def GetResourcesFromFile(self, resource_file):
resources = set()
ConfigFile = FileOperations.open(resource_file, 'r').read().splitlines() # To remove stupid '\n' at the end
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comment lines
try:
Type, Name, Resource = line.split('_____')
# Resource = Resource.strip()
resources.add((Type, Name, Resource))
except ValueError:
cprint("ERROR: The delimiter is incorrect in this line at Resource File: "+str(line.split('_____')))
return resources
def GetReplacementDict(self):
configuration = self.db_config.GetReplacementDict()
configuration.update(self.target.GetTargetConfig())
configuration.update(self.config.GetReplacementDict())
return configuration
def GetRawResources(self, ResourceType):
filter_query = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter_by(resource_type = ResourceType)
# Sorting is necessary for working of ExtractURLs, since it must run after main command, so order is imp
sort_query = filter_query.order_by(models.Resource.id)
raw_resources = sort_query.all()
return raw_resources
def GetResources(self, ResourceType):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResources(ResourceType)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
def GetRawResourceList(self, ResourceList):
raw_resources = self.db.session.query(models.Resource.resource_name, models.Resource.resource).filter(models.Resource.resource_type.in_(ResourceList)).all()
return raw_resources
def GetResourceList(self, ResourceTypeList):
replacement_dict = self.GetReplacementDict()
raw_resources = self.GetRawResourceList(ResourceTypeList)
resources = []
for name, resource in raw_resources:
resources.append([name, self.config.MultipleReplace(resource, replacement_dict)])
return resources
| sharad1126/owtf | framework/db/resource_manager.py | Python | bsd-3-clause | 3,759 |
# For these tests to run successfully, two conditions must be met:
# 1. MEDIA_URL and MEDIA_ROOT must be set in settings
# 2. The user running the tests must have read/write access to MEDIA_ROOT
# Unit tests:
from sorl.thumbnail.tests.classes import ThumbnailTest, DjangoThumbnailTest
from sorl.thumbnail.tests.templatetags import ThumbnailTagTest
from sorl.thumbnail.tests.fields import FieldTest, \
ImageWithThumbnailsFieldTest, ThumbnailFieldTest
# Doc tests:
from sorl.thumbnail.tests.utils import utils_tests
from sorl.thumbnail.tests.templatetags import filesize_tests
__test__ = {
'utils_tests': utils_tests,
'filesize_tests': filesize_tests,
}
| cuker/sorl-thumbnail-legacy | sorl/thumbnail/tests/__init__.py | Python | bsd-3-clause | 665 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: Tanja Baumann
Email: tanja@auterion.com
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| PX4/Firmware | src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py | Python | bsd-3-clause | 10,043 |
import os
from socket import gethostname
import time
import urllib
from uuid import getnode as getmac
import webbrowser
import httplib2 # included with oauth2client
from oauth2client.client import OAuth2WebServerFlow, TokenRevokeError
import oauth2client.file
import gmusicapi
from gmusicapi.clients.shared import _Base
from gmusicapi.compat import my_appdirs
from gmusicapi.exceptions import CallFailure, NotLoggedIn
from gmusicapi.protocol import musicmanager, upload_pb2, locker_pb2
from gmusicapi.utils import utils
from gmusicapi import session
OAUTH_FILEPATH = os.path.join(my_appdirs.user_data_dir, 'oauth.cred')
class Musicmanager(_Base):
"""Allows uploading by posing as Google's Music Manager.
Musicmanager uses OAuth, so a plaintext email and password are not required
when logging in.
For most authors and users of gmusicapi scripts,
:func:`perform_oauth` should be run once per machine to
store credentials to disk.
Future calls to :func:`login` can use
use the stored credentials by default.
Some authors may want more control over the OAuth flow.
In this case, credentials can be directly provided to :func:`login`.
"""
_session_class = session.Musicmanager
@staticmethod
def perform_oauth(storage_filepath=OAUTH_FILEPATH, open_browser=False):
"""Provides a series of prompts for a user to follow to authenticate.
Returns ``oauth2client.client.OAuth2Credentials`` when successful.
In most cases, this should only be run once per machine to store
credentials to disk, then never be needed again.
If the user refuses to give access,
``oauth2client.client.FlowExchangeError`` is raised.
:param storage_filepath: a filepath to write the credentials to,
or ``None``
to not write the credentials to disk (which is not recommended).
`Appdirs <https://pypi.python.org/pypi/appdirs>`__
``user_data_dir`` is used by default. Users can run::
import gmusicapi.clients
print gmusicapi.clients.OAUTH_FILEPATH
to see the exact location on their system.
:param open_browser: if True, attempt to open the auth url
in the system default web browser. The url will be printed
regardless of this param's setting.
This flow is intentionally very simple.
For complete control over the OAuth flow, pass an
``oauth2client.client.OAuth2Credentials``
to :func:`login` instead.
"""
flow = OAuth2WebServerFlow(*musicmanager.oauth)
auth_uri = flow.step1_get_authorize_url()
print
print "Visit the following url:\n %s" % auth_uri
if open_browser:
print
print 'Opening your browser to it now...',
webbrowser.open(auth_uri)
print 'done.'
print "If you don't see your browser, you can just copy and paste the url."
print
code = raw_input("Follow the prompts,"
" then paste the auth code here and hit enter: ")
credentials = flow.step2_exchange(code)
if storage_filepath is not None:
if storage_filepath == OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(storage_filepath)
storage.put(credentials)
return credentials
def __init__(self, debug_logging=True, validate=True, verify_ssl=True):
super(Musicmanager, self).__init__(self.__class__.__name__,
debug_logging,
validate,
verify_ssl)
def login(self, oauth_credentials=OAUTH_FILEPATH,
uploader_id=None, uploader_name=None):
"""Authenticates the Music Manager using OAuth.
Returns ``True`` on success, ``False`` on failure.
Unlike the :class:`Webclient`, OAuth allows authentication without
providing plaintext credentials to the application.
In most cases, the default parameters should be acceptable. Users on
virtual machines will want to provide `uploader_id`.
:param oauth_credentials: ``oauth2client.client.OAuth2Credentials`` or the path to a
``oauth2client.file.Storage`` file. By default, the same default path used by
:func:`perform_oauth` is used.
Endusers will likely call :func:`perform_oauth` once to write
credentials to disk and then ignore this parameter.
This param
is mostly intended to allow flexibility for developers of a
3rd party service who intend to perform their own OAuth flow
(eg on their website).
:param uploader_id: a unique id as a MAC address, eg ``'00:11:22:33:AA:BB'``.
This should only be provided in cases where the default
(host MAC address incremented by 1) will not work.
Upload behavior is undefined if a Music Manager uses the same id, especially when
reporting bad matches.
``ValueError`` will be raised if this is provided but not in the proper form.
``OSError`` will be raised if this is not provided and a real MAC could not be
determined (most common when running on a VPS).
If provided, use the same id on all future runs for this machine,
because of the upload device limit explained below.
:param uploader_name: human-readable non-unique id; default is
``"<hostname> (gmusicapi-{version})"``.
This doesn't appear to be a part of authentication at all.
Registering with (id, name = X, Y) and logging in with
(id, name = X, Z) works, and does not change the server-stored
uploader_name.
There are hard limits on how many upload devices can be registered; refer to `Google's
docs <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1230356>`__. There
have been limits on deauthorizing devices in the past, so it's smart not to register
more devices than necessary.
"""
return (self._oauth_login(oauth_credentials) and
self._perform_upauth(uploader_id, uploader_name))
def _oauth_login(self, oauth_credentials):
"""Auth ourselves to the MM oauth endpoint.
Return True on success; see :py:func:`login` for params.
"""
if isinstance(oauth_credentials, basestring):
oauth_file = oauth_credentials
if oauth_file == OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(oauth_file)
oauth_credentials = storage.get()
if oauth_credentials is None:
self.logger.warning("could not retrieve oauth credentials from '%s'", oauth_file)
return False
if not self.session.login(oauth_credentials):
self.logger.warning("failed to authenticate")
return False
self.logger.info("oauth successful")
return True
def _perform_upauth(self, uploader_id, uploader_name):
"""Auth or register ourselves as an upload client.
Return True on success; see :py:func:`login` for params.
"""
if uploader_id is None:
mac_int = getmac()
if (mac_int >> 40) % 2:
raise OSError('a valid MAC could not be determined.'
' Provide uploader_id (and be'
' sure to provide the same one on future runs).')
else:
# distinguish us from a Music Manager on this machine
mac_int = (mac_int + 1) % (1 << 48)
uploader_id = utils.create_mac_string(mac_int)
if not utils.is_valid_mac(uploader_id):
raise ValueError('uploader_id is not in a valid form.'
'\nProvide 6 pairs of hex digits'
' with capital letters',
' (eg "00:11:22:33:AA:BB")')
if uploader_name is None:
uploader_name = gethostname() + u" (gmusicapi-%s)" % gmusicapi.__version__
try:
# this is a MM-specific step that might register a new device.
self._make_call(musicmanager.AuthenticateUploader,
uploader_id,
uploader_name)
self.logger.info("successful upauth")
self.uploader_id = uploader_id
self.uploader_name = uploader_name
except CallFailure:
self.logger.exception("upauth failure")
self.session.logout()
return False
return True
def logout(self, revoke_oauth=False):
"""Forgets local authentication in this Client instance.
:param revoke_oauth: if True, oauth credentials will be permanently
revoked. If credentials came from a file, it will be deleted.
Returns ``True`` on success."""
# TODO the login/logout stuff is all over the place
success = True
if revoke_oauth:
try:
# this automatically deletes a Storage file, if present
self.session._oauth_creds.revoke(httplib2.Http())
except TokenRevokeError:
self.logger.exception("could not revoke oauth credentials")
success = False
self.uploader_id = None
self.uploader_name = None
return success and super(Musicmanager, self).logout()
# mostly copy-paste from Webclient.get_all_songs.
# not worried about overlap in this case; the logic of either could change.
def get_uploaded_songs(self, incremental=False):
"""Returns a list of dictionaries, each with the following keys:
``('id', 'title', 'album', 'album_artist', 'artist', 'track_number',
'track_size')``.
All Access tracks that were added to the library will not be included,
only tracks uploaded/matched by the user.
:param incremental: if True, return a generator that yields lists
of at most 1000 dictionaries
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
"""
to_return = self._get_all_songs()
if not incremental:
to_return = [song for chunk in to_return for song in chunk]
return to_return
@staticmethod
def _track_info_to_dict(track_info):
"""Given a download_pb2.DownloadTrackInfo, return a dictionary."""
# figure it's better to hardcode keys here than use introspection
# and risk returning a new field all of a sudden.
return dict((field, getattr(track_info, field)) for field in
('id', 'title', 'album', 'album_artist', 'artist',
'track_number', 'track_size'))
def _get_all_songs(self):
"""Return a generator of song chunks."""
get_next_chunk = True
# need to spoof .continuation_token access, and
# can't add attrs to object(). Can with functions.
lib_chunk = lambda: 0 # noqa
lib_chunk.continuation_token = None
while get_next_chunk:
lib_chunk = self._make_call(musicmanager.ListTracks,
self.uploader_id,
lib_chunk.continuation_token)
yield [self._track_info_to_dict(info)
for info in lib_chunk.download_track_info]
get_next_chunk = lib_chunk.HasField('continuation_token')
@utils.enforce_id_param
def download_song(self, song_id):
"""Returns a tuple ``(u'suggested_filename', 'audio_bytestring')``.
The filename
will be what the Music Manager would save the file as,
presented as a unicode string with the proper file extension.
You don't have to use it if you don't want.
:param song_id: a single song id.
To write the song to disk, use something like::
filename, audio = mm.download_song(an_id)
# if open() throws a UnicodeEncodeError, either use
# filename.encode('utf-8')
# or change your default encoding to something sane =)
with open(filename, 'wb') as f:
f.write(audio)
Unlike with :py:func:`Webclient.get_song_download_info
<gmusicapi.clients.Webclient.get_song_download_info>`,
there is no download limit when using this interface.
Also unlike the Webclient, downloading a track requires authentication.
Returning a url does not suffice, since retrieving a track without auth
will produce an http 500.
"""
url = self._make_call(musicmanager.GetDownloadLink,
song_id,
self.uploader_id)['url']
response = self._make_call(musicmanager.DownloadTrack, url)
cd_header = response.headers['content-disposition']
filename = urllib.unquote(cd_header.split("filename*=UTF-8''")[-1])
filename = filename.decode('utf-8')
return (filename, response.content)
# def get_quota(self):
# """Returns a tuple of (allowed number of tracks, total tracks, available tracks)."""
# quota = self._mm_pb_call("client_state").quota
# #protocol incorrect here...
# return (quota.maximumTracks, quota.totalTracks, quota.availableTracks)
@utils.accept_singleton(basestring)
@utils.empty_arg_shortcircuit(return_code='{}')
def upload(self, filepaths, transcode_quality='320k', enable_matching=False):
"""Uploads the given filepaths.
All non-mp3 files will be transcoded before being uploaded.
This is a limitation of Google's backend.
An available installation of ffmpeg or avconv is required in most cases:
see `the installation page
<https://unofficial-google-music-api.readthedocs.org/en
/latest/usage.html?#installation>`__ for details.
Returns a 3-tuple ``(uploaded, matched, not_uploaded)`` of dictionaries, eg::
(
{'<filepath>': '<new server id>'}, # uploaded
{'<filepath>': '<new server id>'}, # matched
{'<filepath>': '<reason, eg ALREADY_EXISTS>'} # not uploaded
)
:param filepaths: a list of filepaths, or a single filepath.
:param transcode_quality: if int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame
(`lower-better int,
<http://trac.ffmpeg.org/wiki/Encoding%20VBR%20(Variable%20Bit%20Rate)%20mp3%20audio>`__).
If string, pass to ffmpeg/avconv ``-b:a`` (eg ``'128k'`` for an average bitrate of 128k).
The default is 320kbps cbr (the highest possible quality).
:param enable_matching: if ``True``, attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
to avoid uploading every song.
This requires ffmpeg or avconv.
**WARNING**: currently, mismatched songs can *not* be fixed with the 'Fix Incorrect Match'
button nor :py:func:`report_incorrect_match
<gmusicapi.clients.Webclient.report_incorrect_match>`.
They would have to be deleted and reuploaded with matching disabled
(or with the Music Manager).
Fixing matches from gmusicapi may be supported in a future release; see issue `#89
<https://github.com/simon-weber/gmusicapi/issues/89>`__.
All Google-supported filetypes are supported; see `Google's documentation
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1100462>`__.
Unlike Google's Music Manager, this function will currently allow the same song to
be uploaded more than once if its tags are changed. This is subject to change in the future.
If ``PERMANENT_ERROR`` is given as a not_uploaded reason, attempts to reupload will never
succeed. The file will need to be changed before the server will reconsider it; the easiest
way is to change metadata tags (it's not important that the tag be uploaded, just that the
contents of the file change somehow).
"""
if self.uploader_id is None or self.uploader_name is None:
raise NotLoggedIn("Not authenticated as an upload device;"
" run Api.login(...perform_upload_auth=True...)"
" first.")
# TODO there is way too much code in this function.
# To return.
uploaded = {}
matched = {}
not_uploaded = {}
# Gather local information on the files.
local_info = {} # {clientid: (path, Track)}
for path in filepaths:
try:
track = musicmanager.UploadMetadata.fill_track_info(path)
except BaseException as e:
self.logger.exception("problem gathering local info of '%r'", path)
user_err_msg = str(e)
if 'Non-ASCII strings must be converted to unicode' in str(e):
# This is a protobuf-specific error; they require either ascii or unicode.
# To keep behavior consistent, make no effort to guess - require users
# to decode first.
user_err_msg = ("nonascii bytestrings must be decoded to unicode"
" (error: '%s')" % user_err_msg)
not_uploaded[path] = user_err_msg
else:
local_info[track.client_id] = (path, track)
if not local_info:
return uploaded, matched, not_uploaded
# TODO allow metadata faking
# Upload metadata; the server tells us what to do next.
res = self._make_call(musicmanager.UploadMetadata,
[t for (path, t) in local_info.values()],
self.uploader_id)
# TODO checking for proper contents should be handled in verification
md_res = res.metadata_response
responses = [r for r in md_res.track_sample_response]
sample_requests = [req for req in md_res.signed_challenge_info]
# Send scan and match samples if requested.
for sample_request in sample_requests:
path, track = local_info[sample_request.challenge_info.client_track_id]
bogus_sample = None
if not enable_matching:
bogus_sample = '' # just send empty bytes
try:
res = self._make_call(musicmanager.ProvideSample,
path, sample_request, track,
self.uploader_id, bogus_sample)
except (IOError, ValueError) as e:
self.logger.warning("couldn't create scan and match sample for '%s': %s",
path, str(e))
not_uploaded[path] = str(e)
else:
responses.extend(res.sample_response.track_sample_response)
# Read sample responses and prep upload requests.
to_upload = {} # {serverid: (path, Track, do_not_rematch?)}
for sample_res in responses:
path, track = local_info[sample_res.client_track_id]
if sample_res.response_code == upload_pb2.TrackSampleResponse.MATCHED:
self.logger.info("matched '%s' to sid %s", path, sample_res.server_track_id)
if enable_matching:
matched[path] = sample_res.server_track_id
else:
self.logger.exception("'%s' was matched without matching enabled", path)
elif sample_res.response_code == upload_pb2.TrackSampleResponse.UPLOAD_REQUESTED:
to_upload[sample_res.server_track_id] = (path, track, False)
else:
# there was a problem
# report the symbolic name of the response code enum for debugging
enum_desc = upload_pb2._TRACKSAMPLERESPONSE.enum_types[0]
res_name = enum_desc.values_by_number[sample_res.response_code].name
err_msg = "TrackSampleResponse code %s: %s" % (sample_res.response_code, res_name)
if res_name == 'ALREADY_EXISTS':
# include the sid, too
# this shouldn't be relied on externally, but I use it in
# tests - being surrounded by parens is how it's matched
err_msg += "(%s)" % sample_res.server_track_id
self.logger.warning("upload of '%s' rejected: %s", path, err_msg)
not_uploaded[path] = err_msg
# Send upload requests.
if to_upload:
# TODO reordering requests could avoid wasting time waiting for reup sync
self._make_call(musicmanager.UpdateUploadState, 'start', self.uploader_id)
for server_id, (path, track, do_not_rematch) in to_upload.items():
# It can take a few tries to get an session.
should_retry = True
attempts = 0
while should_retry and attempts < 10:
session = self._make_call(musicmanager.GetUploadSession,
self.uploader_id, len(uploaded),
track, path, server_id, do_not_rematch)
attempts += 1
got_session, error_details = \
musicmanager.GetUploadSession.process_session(session)
if got_session:
self.logger.info("got an upload session for '%s'", path)
break
should_retry, reason, error_code = error_details
self.logger.debug("problem getting upload session: %s\ncode=%s retrying=%s",
reason, error_code, should_retry)
if error_code == 200 and do_not_rematch:
# reupload requests need to wait on a server sync
# 200 == already uploaded, so force a retry in this case
should_retry = True
time.sleep(6) # wait before retrying
else:
err_msg = "GetUploadSession error %s: %s" % (error_code, reason)
self.logger.warning("giving up on upload session for '%s': %s", path, err_msg)
not_uploaded[path] = err_msg
continue # to next upload
# got a session, do the upload
# this terribly inconsistent naming isn't my fault: Google--
session = session['sessionStatus']
external = session['externalFieldTransfers'][0]
session_url = external['putInfo']['url']
content_type = external.get('content_type', 'audio/mpeg')
if track.original_content_type != locker_pb2.Track.MP3:
try:
self.logger.info("transcoding '%s' to mp3", path)
contents = utils.transcode_to_mp3(path, quality=transcode_quality)
except (IOError, ValueError) as e:
self.logger.warning("error transcoding %s: %s", path, e)
not_uploaded[path] = "transcoding error: %s" % e
continue
else:
with open(path, 'rb') as f:
contents = f.read()
upload_response = self._make_call(musicmanager.UploadFile,
session_url, content_type, contents)
success = upload_response.get('sessionStatus', {}).get('state')
if success:
uploaded[path] = server_id
else:
# 404 == already uploaded? serverside check on clientid?
self.logger.debug("could not finalize upload of '%s'. response: %s",
path, upload_response)
not_uploaded[path] = 'could not finalize upload; details in log'
self._make_call(musicmanager.UpdateUploadState, 'stopped', self.uploader_id)
return uploaded, matched, not_uploaded
| dvirtz/gmusicapi | gmusicapi/clients/musicmanager.py | Python | bsd-3-clause | 24,772 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from . import model, signing, test_common, test_config
mock = test_common.import_mock()
# python2 support.
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@mock.patch('signing.commands.lenient_run_command_output')
@mock.patch('signing.commands.macos_version', return_value=[10, 15])
class TestLinkerSignedArm64NeedsForce(unittest.TestCase):
def test_oserror(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (None, None, None)
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_unsigned(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (
1, b'', b'test: code object is not signed at all\n')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_not_linker_signed(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20100 size=592 flags=0x2(adhoc) hashes=13+2 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements count=0 size=12
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_15(self, macos_version,
lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=512 flags=0x20002(adhoc,???) hashes=13+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertTrue(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_16(self, macos_version,
lenient_run_command_output):
# 10.16 is what a Python built against an SDK < 11.0 will see 11.0 as.
macos_version.return_value = [10, 16]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
def test_linker_signed_11_0(self, macos_version,
lenient_run_command_output):
macos_version.return_value = [11, 0]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
@mock.patch(
'signing.signing._linker_signed_arm64_needs_force', return_value=False)
@mock.patch('signing.commands.run_command')
class TestSignPart(unittest.TestCase):
def setUp(self):
self.paths = model.Paths('/$I', '/$O', '/$W')
self.config = test_config.TestConfig()
def test_sign_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_part_needs_force(self, run_command,
linker_signed_arm64_needs_force):
linker_signed_arm64_needs_force.return_value = True
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--force', '--timestamp',
'--requirements', '=designated => identifier "test.signing.app"',
'/$W/Test.app'
])
def test_sign_part_no_notary(self, run_command,
linker_signed_arm64_needs_force):
config = test_config.TestConfig(notary_user=None, notary_password=None)
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_part_no_identifier_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with(
['codesign', '--sign', '[IDENTITY]', '--timestamp', '/$W/Test.app'])
def test_sign_with_identifier(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', sign_with_identifier=True)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_with_identifier_no_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
sign_with_identifier=True,
identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '/$W/Test.app'
])
def test_sign_part_with_options(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
options=model.CodeSignOptions.RESTRICT +
model.CodeSignOptions.LIBRARY_VALIDATION)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app"', '--options',
'restrict,library', '/$W/Test.app'
])
def test_sign_part_with_entitlements(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
entitlements='entitlements.plist',
identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--entitlements',
'/$W/entitlements.plist', '/$W/Test.app'
])
def test_verify_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.verify_part(self.paths, part)
self.assertEqual(run_command.mock_calls, [
mock.call([
'codesign', '--display', '--verbose=5', '--requirements', '-',
'/$W/Test.app'
]),
mock.call(['codesign', '--verify', '--verbose=6', '/$W/Test.app']),
])
def test_verify_part_with_options(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
verify_options=model.VerifyOptions.DEEP +
model.VerifyOptions.IGNORE_RESOURCES)
signing.verify_part(self.paths, part)
self.assertEqual(run_command.mock_calls, [
mock.call([
'codesign', '--display', '--verbose=5', '--requirements', '-',
'/$W/Test.app'
]),
mock.call([
'codesign', '--verify', '--verbose=6', '--deep',
'--ignore-resources', '/$W/Test.app'
]),
])
| ric2b/Vivaldi-browser | chromium/chrome/installer/mac/signing/signing_test.py | Python | bsd-3-clause | 9,294 |
# -*- coding: utf-8 -*-
from django import VERSION as DJANGO_VERSION
from django.contrib.contenttypes.models import ContentType
from django.core.management import BaseCommand
from wagtail.wagtailcore.models import Page, Site
class Command(BaseCommand):
help = "Load Puput initial dummy data"
def handle(self, *args, **options):
# Get blogpage content type
blogpage_content_type, created = ContentType.objects.get_or_create(
model='blogpage',
app_label='puput',
defaults={'name': 'page'} if DJANGO_VERSION < (1, 8) else {}
)
# Get root page
rootpage = Page.objects.first()
# Set site root page as root site page
site = Site.objects.first()
site.root_page = rootpage
site.save()
# Create example blog page
blogpage = Page(
title="Blog",
content_type=blogpage_content_type,
slug='blog',
)
# Add blog page as a child for homepage
rootpage.add_child(instance=blogpage)
revision = blogpage.save_revision()
revision.publish()
| aaloy/puput | puput/management/commands/puput_initial_data.py | Python | mit | 1,136 |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3,), (3, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlipUD(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flipud(x)
testing.assert_allclose(y.data, numpy.flipud(self.x))
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.flipud, x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
functions.flipud, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
atol=5e-4, rtol=5e-3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| rezoo/chainer | tests/chainer_tests/functions_tests/array_tests/test_flipud.py | Python | mit | 1,875 |
#!/usr/bin/python
# script find clusters of small RNA reads in the genome
# version 3 - 24-12-2013 evolution to multiprocessing
# Usage clustering.py <bowtie input> <output> <bowtie index> <clustering_distance> <minimum read number per cluster to be outputed> <collapse option> <extention value> <average_cluster_size>
# <folding> <output format>
import sys, subprocess, time
from collections import defaultdict # required for some SmRNAwindow attributes (readDic)
#from numpy import mean, std # required for some SmRNAwindow methods
#from scipy import stats
from smRtools import *
import multiprocessing
def clustering (Instance):
def clustermining (cluster, Instance): # cluster argument is a list
if Instance.readDict[-cluster[0]]: # test whether the first position in the cluster was reverse reads
shift = max(Instance.readDict[-cluster[0]])
upstream_coord = cluster[0] - shift + 1
else:
upstream_coord = cluster[0]
if Instance.readDict[cluster[-1]]: # test whether the last position in the cluster was forward reads
shift = max(Instance.readDict[cluster[-1]])
downstream_coord = cluster[-1] + shift -1
else:
downstream_coord = cluster[-1]
readcount = Instance.readcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
mean_size, median_size, stdv_size = Instance.statsizes(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
if readcount >= minimum_reads and median_size >= min_median_size:
location = [Instance.gene.split()[0], upstream_coord, downstream_coord]
if output_format == "intervals":
return "%s\t%s\t%s\t%s" % (location[0], location[1], location[2], readcount)
cluster_size = downstream_coord - upstream_coord + 1
if folding == "yes" and cluster_size < 151:
foldEnergy = Instance.foldEnergy(upstream_coord=upstream_coord, downstream_coord=downstream_coord) ## be careful, test !
else:
foldEnergy = "."
forwardReadcount = Instance.forwardreadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
reverseReadcount = Instance.reversereadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
density = readcount / float(cluster_size) #
if output_format == "GFF3":
if forwardReadcount >= reverseReadcount:
GFFstrand = "+"
else:
GFFstrand = "-"
Attributes = "ID=RC %s : FR %s : RR %s : Dens %s : Med %s : FE %s" % (readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy)
return "%s\tGalaxy\tRead_Cluster\t%s\t%s\t%s\t%s\t.\t%s" % (location[0], location[1], location[2], readcount, GFFstrand, Attributes)
else:
Forward_Barycenter, Reverse_Barycenter = Instance.barycenter(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
Zsignature = Instance.signature(24,29,24,29,range(1,27), zscore="yes", upstream_coord=upstream_coord, downstream_coord=downstream_coord)[10] #
Hsignature = Instance.hannon_signature(24,29,24,29, range(1,27), upstream_coord=upstream_coord, downstream_coord=downstream_coord )[10] * 100
UpiFreq = Instance.Ufreq(range(24,29), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
UsiFreq = Instance.Ufreq(range(20,22), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (location[0], location[1], location[2], cluster_size, readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy, Forward_Barycenter, Reverse_Barycenter, Zsignature, Hsignature, UpiFreq, UsiFreq)
return False
l = Instance.readDict.keys()
l=[abs(i) for i in l]
l=list(set(l))
l.sort()
upstream = 0
cluster_list = []
for i, element in enumerate (l[1:]):
if abs(element-l[i]) > dist or i+2==len(l): # the 2nd part of the logical test is to capture the last cluster if it overlaps the end of the list
cluster = l[upstream:i+1]
upstream = i+1
cluster_list.append(cluster)
result_list = []
for i in cluster_list:
totestresult = clustermining (i, Instance)
if totestresult: result_list.append(totestresult)
del Instance #
return result_list
def logtask (results):
global number_of_clusters
if results:
number_of_clusters += len(results)
LOG.append(results)
return
if __name__ == '__main__':
start_time = time.time()
fasta_dic = get_fasta (sys.argv[3])
objDic = {}
number_of_reads = 0
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
for line in F:
number_of_reads += 1
fields = line.split()
polarity = fields[1]
gene = fields[2]
offset = int(fields[3])
size = len (fields[4])
try:
objDic[gene].addread (polarity, offset, size)
except KeyError:
objDic[gene] = SmRNAwindow(gene, fasta_dic[gene])
objDic[gene].addread (polarity, offset, size)
F.close()
OUT = open (sys.argv[2], "w")
output_format=sys.argv[8]
if output_format == "intervals":
print >> OUT, "#chrom\tStart\tEnd\tReadCount"
elif output_format == "GFF3":
print >> OUT, "##gff-version 3"
else:
print >> OUT, "#ID\t#chrom\tStart\tEnd\tLength\tReadCount\tForwardReads\tReverseReads\tDensity\tMedian\tFoldEnergy\tForBar\tRevBar\tz-score_signature\tHannon_signature\tUfreq_in_24-28RNAs\tUfreq_in_20-21RNs"
dist = int(sys.argv[4])
min_median_size = int(sys.argv[6])
minimum_reads = int(sys.argv[5])
number_of_clusters = 0
Instance_ID = 0
folding=sys.argv[7]
pool = multiprocessing.Pool(4)
LOG = []
instance_list = []
for instance in objDic.keys():
instance_list.append(objDic[instance])
del objDic
pool.map_async(clustering, instance_list, callback=logtask)
pool.close()
pool.join()
for lines in LOG:
for line in lines:
print >> OUT, line
OUT.close()
elapsed_time = time.time() - start_time
print "number of reads: %s\nnumber of clusters: %s\ntime: %s" % (number_of_reads, number_of_clusters, elapsed_time)
| JuPeg/tools-artbio | unstable/local_tools/clustering4.py | Python | mit | 6,082 |
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - key bindings (instead of quick-n-dirty bindings on Canvas):
# - up/down arrow keys to move focus around
# - ditto for page up/down, home/end
# - left/right arrows to expand/collapse & move out/in
# - more doc strings
# - add icons for "file", "module", "class", "method"; better "python" icon
# - callback for selection???
# - multiple-item selection
# - tooltips
# - redo geometry without magic numbers
# - keep track of object ids to allow more careful cleaning
# - optimize tree redraw after expand of subnode
import os
from Tkinter import *
import imp
from idlelib import ZoomHeight
from idlelib.configHandler import idleConf
ICONDIR = "Icons"
# Look for Icons subdirectory in the same directory as this module
try:
_icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
except NameError:
_icondir = ICONDIR
if os.path.isdir(_icondir):
ICONDIR = _icondir
elif not os.path.isdir(ICONDIR):
raise RuntimeError, "can't find icon directory (%r)" % (ICONDIR,)
def listicons(icondir=ICONDIR):
"""Utility to display the available icons."""
root = Tk()
import glob
list = glob.glob(os.path.join(icondir, "*.gif"))
list.sort()
images = []
row = column = 0
for file in list:
name = os.path.splitext(os.path.basename(file))[0]
image = PhotoImage(file=file, master=root)
images.append(image)
label = Label(root, image=image, bd=1, relief="raised")
label.grid(row=row, column=column)
label = Label(root, text=name)
label.grid(row=row+1, column=column)
column = column + 1
if column >= 10:
row = row+2
column = 0
root.images = images
class TreeNode:
def __init__(self, canvas, parent, item):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = False
self.children = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
def destroy(self):
for c in self.children[:]:
self.children.remove(c)
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = True
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselect(self, event=None):
if not self.selected:
return
self.selected = False
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for child in self.children:
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def expand(self, event=None):
if not self.item._IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def lastvisiblechild(self):
if self.children and self.state == 'expanded':
return self.children[-1].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y+17
# draw children
if not self.children:
sublist = self.item._GetSubList()
if not sublist:
# _IsExpandable() was mistaken; that's allowed
return y+17
for item in sublist:
child = self.__class__(self.canvas, self, item)
self.children.append(child)
cx = x+20
cy = y+17
cylast = 0
for child in self.children:
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item._IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
def drawtext(self):
textx = self.x+20-1
texty = self.y-1
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
label = self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.GetOption('main','Theme','name')
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
entry.destroy()
self.drawtext()
self.canvas.focus_set()
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
expandable = None
def _IsExpandable(self):
"""Do not override! Called by TreeNode."""
if self.expandable is None:
self.expandable = self.IsExpandable()
return self.expandable
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
if not sublist:
self.expandable = 0
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
# Example application
class FileTreeItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, path):
self.path = path
def GetText(self):
return os.path.basename(self.path) or self.path
def IsEditable(self):
return os.path.basename(self.path) != ""
def SetText(self, text):
newpath = os.path.dirname(self.path)
newpath = os.path.join(newpath, text)
if os.path.dirname(newpath) != os.path.dirname(self.path):
return
try:
os.rename(self.path, newpath)
self.path = newpath
except os.error:
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
return os.path.isdir(self.path)
def GetSubList(self):
try:
names = os.listdir(self.path)
except os.error:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
# A canvas widget with scroll bars and some useful bindings
class ScrolledCanvas:
def __init__(self, master, **opts):
if 'yscrollincrement' not in opts:
opts['yscrollincrement'] = 17
self.master = master
self.frame = Frame(master)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.canvas = Canvas(self.frame, **opts)
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = Scrollbar(self.frame, name="vbar")
self.vbar.grid(row=0, column=1, sticky="nse")
self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
self.hbar.grid(row=1, column=0, sticky="ews")
self.canvas['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.canvas.yview
self.canvas['xscrollcommand'] = self.hbar.set
self.hbar['command'] = self.canvas.xview
self.canvas.bind("<Key-Prior>", self.page_up)
self.canvas.bind("<Key-Next>", self.page_down)
self.canvas.bind("<Key-Up>", self.unit_up)
self.canvas.bind("<Key-Down>", self.unit_down)
#if isinstance(master, Toplevel) or isinstance(master, Tk):
self.canvas.bind("<Alt-Key-2>", self.zoom_height)
self.canvas.focus_set()
def page_up(self, event):
self.canvas.yview_scroll(-1, "page")
return "break"
def page_down(self, event):
self.canvas.yview_scroll(1, "page")
return "break"
def unit_up(self, event):
self.canvas.yview_scroll(-1, "unit")
return "break"
def unit_down(self, event):
self.canvas.yview_scroll(1, "unit")
return "break"
def zoom_height(self, event):
ZoomHeight.zoom_height(self.master)
return "break"
# Testing functions
def test():
from idlelib import PyShell
root = Toplevel(PyShell.root)
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = FileTreeItem("C:/windows/desktop")
node = TreeNode(sc.canvas, None, item)
node.expand()
def test2():
# test w/o scrolling canvas
root = Tk()
root.configure(bd=0)
canvas = Canvas(root, bg="white", highlightthickness=0)
canvas.pack(expand=1, fill="both")
item = FileTreeItem(os.curdir)
node = TreeNode(canvas, None, item)
node.update()
canvas.focus_set()
if __name__ == '__main__':
test()
| ktan2020/legacy-automation | win/Lib/idlelib/TreeWidget.py | Python | mit | 15,708 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.database import get_connection
from datetime import *
import time
from pprint import pprint
def js_time(s):
d=datetime.strptime(s,"%Y-%m-%d %H:%M:%S")
return time.mktime(d.timetuple()) * 1000
def js_date(s):
d=datetime.strptime(s,"%Y-%m-%d")
return time.mktime(d.timetuple()) * 1000
class ReportIssue(Model):
_name = "report.issue"
_store = False
def get_issue_chart(self, context={}):
actions=[]
for issue in get_model("issue").search_browse([]):
if issue.date_created:
actions.append((issue.date_created,"open"))
if issue.state=="closed" and issue.date_closed:
actions.append((issue.date_closed,"close"))
actions.sort()
values=[]
num_issues=0
for d,action in actions:
if action=="open":
num_issues+=1
elif action=="close":
num_issues-=1
values.append((js_time(d), num_issues))
data = {
"value": values,
}
return data
def get_issue_close_chart(self, context={}):
closed={}
for issue in get_model("issue").search_browse([["state","=","closed"],["date_closed","!=",None]]):
d=issue.date_closed[:10]
closed.setdefault(d,0)
closed[d]+=1
values=[]
for d,n in sorted(closed.items()):
values.append((js_date(d), n))
data = {
"value": [{
"key": "Closed",
"values": values,
}]
}
pprint(data)
return data
ReportIssue.register()
| sidzan/netforce | netforce_support/netforce_support/models/report_issue.py | Python | mit | 2,805 |
import unittest
import os
import os.path
import contextlib
import sys
import test._mock_backport as mock
import test.test_support
import ensurepip
import ensurepip._uninstall
class TestEnsurePipVersion(unittest.TestCase):
def test_returns_version(self):
self.assertEqual(ensurepip._PIP_VERSION, ensurepip.version())
class EnsurepipMixin:
def setUp(self):
run_pip_patch = mock.patch("ensurepip._run_pip")
self.run_pip = run_pip_patch.start()
self.addCleanup(run_pip_patch.stop)
# Avoid side effects on the actual os module
real_devnull = os.devnull
os_patch = mock.patch("ensurepip.os")
patched_os = os_patch.start()
self.addCleanup(os_patch.stop)
patched_os.devnull = real_devnull
patched_os.path = os.path
self.os_environ = patched_os.environ = os.environ.copy()
class TestBootstrap(EnsurepipMixin, unittest.TestCase):
def test_basic_bootstrapping(self):
ensurepip.bootstrap()
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "setuptools", "pip",
],
mock.ANY,
)
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
def test_bootstrapping_with_root(self):
ensurepip.bootstrap(root="/foo/bar/")
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--root", "/foo/bar/",
"setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_user(self):
ensurepip.bootstrap(user=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--user", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_upgrade(self):
ensurepip.bootstrap(upgrade=True)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "--upgrade", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_1(self):
ensurepip.bootstrap(verbosity=1)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-v", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_2(self):
ensurepip.bootstrap(verbosity=2)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_verbosity_3(self):
ensurepip.bootstrap(verbosity=3)
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "-vvv", "setuptools", "pip",
],
mock.ANY,
)
def test_bootstrapping_with_regular_install(self):
ensurepip.bootstrap()
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "install")
def test_bootstrapping_with_alt_install(self):
ensurepip.bootstrap(altinstall=True)
self.assertEqual(self.os_environ["ENSUREPIP_OPTIONS"], "altinstall")
def test_bootstrapping_with_default_pip(self):
ensurepip.bootstrap(default_pip=True)
self.assertNotIn("ENSUREPIP_OPTIONS", self.os_environ)
def test_altinstall_default_pip_conflict(self):
with self.assertRaises(ValueError):
ensurepip.bootstrap(altinstall=True, default_pip=True)
self.assertFalse(self.run_pip.called)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder"
ensurepip.bootstrap()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
ensurepip.bootstrap()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
@contextlib.contextmanager
def fake_pip(version=ensurepip._PIP_VERSION):
if version is None:
pip = None
else:
class FakePip():
__version__ = version
pip = FakePip()
sentinel = object()
orig_pip = sys.modules.get("pip", sentinel)
sys.modules["pip"] = pip
try:
yield pip
finally:
if orig_pip is sentinel:
del sys.modules["pip"]
else:
sys.modules["pip"] = orig_pip
class TestUninstall(EnsurepipMixin, unittest.TestCase):
def test_uninstall_skipped_when_not_installed(self):
with fake_pip(None):
ensurepip._uninstall_helper()
self.assertFalse(self.run_pip.called)
def test_uninstall_skipped_with_warning_for_wrong_version(self):
with fake_pip("not a valid version"):
with test.test_support.captured_stderr() as stderr:
ensurepip._uninstall_helper()
warning = stderr.getvalue().strip()
self.assertIn("only uninstall a matching version", warning)
self.assertFalse(self.run_pip.called)
def test_uninstall(self):
with fake_pip():
ensurepip._uninstall_helper()
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_1(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=1)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-v", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_2(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=2)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vv", "pip",
"setuptools",
]
)
def test_uninstall_with_verbosity_3(self):
with fake_pip():
ensurepip._uninstall_helper(verbosity=3)
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "-vvv",
"pip", "setuptools",
]
)
def test_pip_environment_variables_removed(self):
# ensurepip deliberately ignores all pip environment variables
# See http://bugs.python.org/issue19734 for details
self.os_environ["PIP_THIS_SHOULD_GO_AWAY"] = "test fodder"
with fake_pip():
ensurepip._uninstall_helper()
self.assertNotIn("PIP_THIS_SHOULD_GO_AWAY", self.os_environ)
def test_pip_config_file_disabled(self):
# ensurepip deliberately ignores the pip config file
# See http://bugs.python.org/issue20053 for details
with fake_pip():
ensurepip._uninstall_helper()
self.assertEqual(self.os_environ["PIP_CONFIG_FILE"], os.devnull)
# Basic testing of the main functions and their argument parsing
EXPECTED_VERSION_OUTPUT = "pip " + ensurepip._PIP_VERSION
class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase):
def test_bootstrap_version(self):
with test.test_support.captured_stderr() as stderr:
with self.assertRaises(SystemExit):
ensurepip._main(["--version"])
result = stderr.getvalue().strip()
self.assertEqual(result, EXPECTED_VERSION_OUTPUT)
self.assertFalse(self.run_pip.called)
def test_basic_bootstrapping(self):
ensurepip._main([])
self.run_pip.assert_called_once_with(
[
"install", "--no-index", "--find-links",
mock.ANY, "setuptools", "pip",
],
mock.ANY,
)
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase):
def test_uninstall_version(self):
with test.test_support.captured_stderr() as stderr:
with self.assertRaises(SystemExit):
ensurepip._uninstall._main(["--version"])
result = stderr.getvalue().strip()
self.assertEqual(result, EXPECTED_VERSION_OUTPUT)
self.assertFalse(self.run_pip.called)
def test_basic_uninstall(self):
with fake_pip():
ensurepip._uninstall._main([])
self.run_pip.assert_called_once_with(
[
"uninstall", "-y", "--disable-pip-version-check", "pip",
"setuptools",
]
)
if __name__ == "__main__":
test.test_support.run_unittest(__name__)
| sometallgit/AutoUploader | Python27/Lib/test/test_ensurepip.py | Python | mit | 9,313 |
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose.Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose.Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "jpeg"
try:
#invoke Aspose.BarCode Cloud SDK API to create barcode and put in cloud storage
response = barcodeApi.PutBarcodeGenerateFile(name, file=None, text=text, type=type, format=format)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 | farooqsheikhpk/Aspose.BarCode-for-Cloud | Examples/Python/generating-saving/cloud-storage/generate-barcode-and-save-asposecloudstorage.py | Python | mit | 1,910 |
#!/usr/bin/env python
"""
Simple desktop dialogue box support for Python.
Copyright (C) 2007, 2009 Paul Boddie <paul@boddie.org.uk>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
--------
Opening Dialogue Boxes (Dialogs)
--------------------------------
To open a dialogue box (dialog) in the current desktop environment, relying on
the automatic detection of that environment, use the appropriate dialogue box
class:
question = desktop.dialog.Question("Are you sure?")
result = question.open()
To override the detected desktop, specify the desktop parameter to the open
function as follows:
question.open("KDE") # Insists on KDE
question.open("GNOME") # Insists on GNOME
The dialogue box options are documented in each class's docstring.
Available dialogue box classes are listed in the desktop.dialog.available
attribute.
Supported desktop environments are listed in the desktop.dialog.supported
attribute.
"""
from desktop import use_desktop, _run, _readfrom, _status
class _wrapper:
def __init__(self, handler):
self.handler = handler
class _readvalue(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell).strip()
class _readinput(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell)[:-1]
class _readvalues_kdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip().strip('"')
if result:
return result.split('" "')
else:
return []
class _readvalues_zenity(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("|")
else:
return []
class _readvalues_Xdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("/")
else:
return []
# Dialogue parameter classes.
class String:
"A generic parameter."
def __init__(self, name):
self.name = name
def convert(self, value, program):
return [value or ""]
class Strings(String):
"Multiple string parameters."
def convert(self, value, program):
return value or []
class StringPairs(String):
"Multiple string parameters duplicated to make identifiers."
def convert(self, value, program):
l = []
for v in value:
l.append(v)
l.append(v)
return l
class StringKeyword:
"A keyword parameter."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
return [self.keyword + "=" + (value or "")]
class StringKeywords:
"Multiple keyword parameters."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
l = []
for v in value or []:
l.append(self.keyword + "=" + v)
return l
class Integer(String):
"An integer parameter."
defaults = {
"width" : 40,
"height" : 15,
"list_height" : 10
}
scale = 8
def __init__(self, name, pixels=0):
String.__init__(self, name)
if pixels:
self.factor = self.scale
else:
self.factor = 1
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [str(int(value) * self.factor)]
class IntegerKeyword(Integer):
"An integer keyword parameter."
def __init__(self, keyword, name, pixels=0):
Integer.__init__(self, name, pixels)
self.keyword = keyword
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [self.keyword + "=" + str(int(value) * self.factor)]
class Boolean(String):
"A boolean parameter."
values = {
"kdialog" : ["off", "on"],
"zenity" : ["FALSE", "TRUE"],
"Xdialog" : ["off", "on"]
}
def convert(self, value, program):
values = self.values[program]
if value:
return [values[1]]
else:
return [values[0]]
class MenuItemList(String):
"A menu item list parameter."
def convert(self, value, program):
l = []
for v in value:
l.append(v.value)
l.append(v.text)
return l
class ListItemList(String):
"A radiolist/checklist item list parameter."
def __init__(self, name, status_first=0):
String.__init__(self, name)
self.status_first = status_first
def convert(self, value, program):
l = []
for v in value:
boolean = Boolean(None)
status = boolean.convert(v.status, program)
if self.status_first:
l += status
l.append(v.value)
l.append(v.text)
if not self.status_first:
l += status
return l
# Dialogue argument values.
class MenuItem:
"A menu item which can also be used with radiolists and checklists."
def __init__(self, value, text, status=0):
self.value = value
self.text = text
self.status = status
# Dialogue classes.
class Dialogue:
commands = {
"KDE" : "kdialog",
"GNOME" : "zenity",
"XFCE" : "zenity", # NOTE: Based on observations with Xubuntu.
"X11" : "Xdialog"
}
def open(self, desktop=None):
"""
Open a dialogue box (dialog) using a program appropriate to the desktop
environment in use.
If the optional 'desktop' parameter is specified then attempt to use
that particular desktop environment's mechanisms to open the dialog
instead of guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME",
"Mac OS X", "Windows".
The result of the dialogue interaction may be a string indicating user
input (for Input, Password, Menu, Pulldown), a list of strings
indicating selections of one or more items (for RadioList, CheckList),
or a value indicating true or false (for Question, Warning, Message,
Error).
Where a string value may be expected but no choice is made, an empty
string may be returned. Similarly, where a list of values is expected
but no choice is made, an empty list may be returned.
"""
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError("Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use)
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
handler, options = self.info[program]
cmd = [program]
for option in options:
if isinstance(option, str):
cmd.append(option)
else:
value = getattr(self, option.name, None)
cmd += option.convert(value, program)
return handler(cmd, 0)
class Simple(Dialogue):
def __init__(self, text, width=None, height=None):
self.text = text
self.width = width
self.height = height
class Question(Simple):
"""
A dialogue asking a question and showing response buttons.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "question"
info = {
"kdialog" : (_status, ["--yesno", String("text")]),
"zenity" : (_status, ["--question", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--yesno", String("text"), Integer("height"), Integer("width")]),
}
class Warning(Simple):
"""
A dialogue asking a question and showing response buttons.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "warning"
info = {
"kdialog" : (_status, ["--warningyesno", String("text")]),
"zenity" : (_status, ["--warning", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--yesno", String("text"), Integer("height"), Integer("width")]),
}
class Message(Simple):
"""
A message dialogue.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "message"
info = {
"kdialog" : (_status, ["--msgbox", String("text")]),
"zenity" : (_status, ["--info", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--msgbox", String("text"), Integer("height"), Integer("width")]),
}
class Error(Simple):
"""
An error dialogue.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "error"
info = {
"kdialog" : (_status, ["--error", String("text")]),
"zenity" : (_status, ["--error", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--msgbox", String("text"), Integer("height"), Integer("width")]),
}
class Menu(Simple):
"""
A menu of options, one of which being selectable.
Options: text, width (in characters), height (in characters),
list_height (in items), items (MenuItem objects)
Response: a value corresponding to the chosen item
"""
name = "menu"
info = {
"kdialog" : (_readvalue(_readfrom), ["--menu", String("text"), MenuItemList("items")]),
"zenity" : (_readvalue(_readfrom), ["--list", StringKeyword("--text", "text"), StringKeywords("--column", "titles"),
MenuItemList("items")]
),
"Xdialog" : (_readvalue(_readfrom), ["--stdout", "--menubox",
String("text"), Integer("height"), Integer("width"), Integer("list_height"), MenuItemList("items")]
),
}
item = MenuItem
number_of_titles = 2
def __init__(self, text, titles, items=None, width=None, height=None, list_height=None):
"""
Initialise a menu with the given heading 'text', column 'titles', and
optional 'items' (which may be added later), 'width' (in characters),
'height' (in characters) and 'list_height' (in items).
"""
Simple.__init__(self, text, width, height)
self.titles = ([""] * self.number_of_titles + titles)[-self.number_of_titles:]
self.items = items or []
self.list_height = list_height
def add(self, *args, **kw):
"""
Add an item, passing the given arguments to the appropriate item class.
"""
self.items.append(self.item(*args, **kw))
class RadioList(Menu):
"""
A list of radio buttons, one of which being selectable.
Options: text, width (in characters), height (in characters),
list_height (in items), items (MenuItem objects), titles
Response: a list of values corresponding to chosen items (since some
programs, eg. zenity, appear to support multiple default
selections)
"""
name = "radiolist"
info = {
"kdialog" : (_readvalues_kdialog(_readfrom), ["--radiolist", String("text"), ListItemList("items")]),
"zenity" : (_readvalues_zenity(_readfrom),
["--list", "--radiolist", StringKeyword("--text", "text"), StringKeywords("--column", "titles"),
ListItemList("items", 1)]
),
"Xdialog" : (_readvalues_Xdialog(_readfrom), ["--stdout", "--radiolist",
String("text"), Integer("height"), Integer("width"), Integer("list_height"), ListItemList("items")]
),
}
number_of_titles = 3
class CheckList(Menu):
"""
A list of checkboxes, many being selectable.
Options: text, width (in characters), height (in characters),
list_height (in items), items (MenuItem objects), titles
Response: a list of values corresponding to chosen items
"""
name = "checklist"
info = {
"kdialog" : (_readvalues_kdialog(_readfrom), ["--checklist", String("text"), ListItemList("items")]),
"zenity" : (_readvalues_zenity(_readfrom),
["--list", "--checklist", StringKeyword("--text", "text"), StringKeywords("--column", "titles"),
ListItemList("items", 1)]
),
"Xdialog" : (_readvalues_Xdialog(_readfrom), ["--stdout", "--checklist",
String("text"), Integer("height"), Integer("width"), Integer("list_height"), ListItemList("items")]
),
}
number_of_titles = 3
class Pulldown(Menu):
"""
A pull-down menu of options, one of which being selectable.
Options: text, width (in characters), height (in characters),
items (list of values)
Response: a value corresponding to the chosen item
"""
name = "pulldown"
info = {
"kdialog" : (_readvalue(_readfrom), ["--combobox", String("text"), Strings("items")]),
"zenity" : (_readvalue(_readfrom),
["--list", "--radiolist", StringKeyword("--text", "text"), StringKeywords("--column", "titles"),
StringPairs("items")]
),
"Xdialog" : (_readvalue(_readfrom),
["--stdout", "--combobox", String("text"), Integer("height"), Integer("width"), Strings("items")]),
}
item = str
number_of_titles = 2
class Input(Simple):
"""
An input dialogue, consisting of an input field.
Options: text, input, width (in characters), height (in characters)
Response: the text entered into the dialogue by the user
"""
name = "input"
info = {
"kdialog" : (_readinput(_readfrom),
["--inputbox", String("text"), String("data")]),
"zenity" : (_readinput(_readfrom),
["--entry", StringKeyword("--text", "text"), StringKeyword("--entry-text", "data")]),
"Xdialog" : (_readinput(_readfrom),
["--stdout", "--inputbox", String("text"), Integer("height"), Integer("width"), String("data")]),
}
def __init__(self, text, data="", width=None, height=None):
Simple.__init__(self, text, width, height)
self.data = data
class Password(Input):
"""
A password dialogue, consisting of a password entry field.
Options: text, width (in characters), height (in characters)
Response: the text entered into the dialogue by the user
"""
name = "password"
info = {
"kdialog" : (_readinput(_readfrom),
["--password", String("text")]),
"zenity" : (_readinput(_readfrom),
["--entry", StringKeyword("--text", "text"), "--hide-text"]),
"Xdialog" : (_readinput(_readfrom),
["--stdout", "--password", "--inputbox", String("text"), Integer("height"), Integer("width")]),
}
class TextFile(Simple):
"""
A text file input box.
Options: filename, text, width (in characters), height (in characters)
Response: any text returned by the dialogue program (typically an empty
string)
"""
name = "textfile"
info = {
"kdialog" : (_readfrom, ["--textbox", String("filename"), Integer("width", pixels=1), Integer("height", pixels=1)]),
"zenity" : (_readfrom, ["--text-info", StringKeyword("--filename", "filename"), IntegerKeyword("--width", "width", pixels=1),
IntegerKeyword("--height", "height", pixels=1)]
),
"Xdialog" : (_readfrom, ["--stdout", "--textbox", String("filename"), Integer("height"), Integer("width")]),
}
def __init__(self, filename, text="", width=None, height=None):
Simple.__init__(self, text, width, height)
self.filename = filename
# Available dialogues.
available = [Question, Warning, Message, Error, Menu, CheckList, RadioList, Input, Password, Pulldown, TextFile]
# Supported desktop environments.
supported = list(Dialogue.commands.keys())
# vim: tabstop=4 expandtab shiftwidth=4
| constantx/dotfiles | sublime3/Packages/SideBarEnhancements-st3/desktop/dialog.py | Python | mit | 17,225 |
#!/usr/bin/env python
'''
Developer script to convert yaml periodic table to json format.
Created on Nov 15, 2011
'''
from __future__ import division
import json
import ruamel.yaml as yaml
import re
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 15, 2011"
def test_yaml():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
print(data)
def test_json():
with open('periodic_table.json', 'r') as f:
data = json.load(f)
print(data)
def parse_oxi_state():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('oxidation_states.txt', 'r')
oxidata = f.read()
f.close()
oxidata = re.sub('[\n\r]', '', oxidata)
patt = re.compile('<tr>(.*?)</tr>', re.MULTILINE)
for m in patt.finditer(oxidata):
line = m.group(1)
line = re.sub('</td>', '', line)
line = re.sub('(<td>)+', '<td>', line)
line = re.sub('</*a[^>]*>', '', line)
el = None
oxistates = []
common_oxi = []
for tok in re.split('<td>', line.strip()):
m2 = re.match("<b>([A-Z][a-z]*)</b>", tok)
if m2:
el = m2.group(1)
else:
m3 = re.match("(<b>)*([\+\-]\d)(</b>)*", tok)
if m3:
oxistates.append(int(m3.group(2)))
if m3.group(1):
common_oxi.append(int(m3.group(2)))
if el in data:
del data[el]['Max oxidation state']
del data[el]['Min oxidation state']
del data[el]['Oxidation_states']
del data[el]['Common_oxidation_states']
data[el]['Oxidation states'] = oxistates
data[el]['Common oxidation states'] = common_oxi
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('ionic_radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
suffix = ""
name = toks[1]
if len(name.split(" ")) > 1:
suffix = "_" + name.split(" ")[1]
el = toks[2]
ionic_radii = {}
for j in range(3, len(toks)):
m = re.match("^\s*([0-9\.]+)", toks[j])
if m:
ionic_radii[int(header[j])] = float(m.group(1))
if el in data:
data[el]['Ionic_radii' + suffix] = ionic_radii
if suffix == '_hs':
data[el]['Ionic_radii'] = ionic_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
def parse_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
f = open('radii.csv', 'r')
radiidata = f.read()
f.close()
radiidata = radiidata.split("\r")
header = radiidata[0].split(",")
for i in range(1, len(radiidata)):
line = radiidata[i]
toks = line.strip().split(",")
el = toks[1]
try:
atomic_radii = float(toks[3]) / 100
except:
atomic_radii = toks[3]
try:
atomic_radii_calc = float(toks[4]) / 100
except:
atomic_radii_calc = toks[4]
try:
vdw_radii = float(toks[5]) / 100
except:
vdw_radii = toks[5]
if el in data:
data[el]['Atomic radius'] = atomic_radii
data[el]['Atomic radius calculated'] = atomic_radii_calc
data[el]['Van der waals radius'] = vdw_radii
else:
print(el)
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def update_ionic_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
for el, d in data.items():
if "Ionic_radii" in d:
d["Ionic radii"] = {k: v / 100
for k, v in d["Ionic_radii"].items()}
del d["Ionic_radii"]
if "Ionic_radii_hs" in d:
d["Ionic radii hs"] = {k: v / 100
for k, v in d["Ionic_radii_hs"].items()}
del d["Ionic_radii_hs"]
if "Ionic_radii_ls" in d:
d["Ionic radii ls"] = {k: v / 100
for k, v in d["Ionic_radii_ls"].items()}
del d["Ionic_radii_ls"]
with open('periodic_table2.yaml', 'w') as f:
yaml.dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def parse_shannon_radii():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
from openpyxl import load_workbook
import collections
wb = load_workbook('Shannon Radii.xlsx')
print(wb.get_sheet_names())
sheet = wb["Sheet1"]
i = 2
radii = collections.defaultdict(dict)
while sheet["E%d" % i].value:
if sheet["A%d" % i].value:
el = sheet["A%d" % i].value
if sheet["B%d" % i].value:
charge = int(sheet["B%d" % i].value)
radii[el][charge] = dict()
if sheet["C%d" % i].value:
cn = sheet["C%d" % i].value
if cn not in radii[el][charge]:
radii[el][charge][cn] = dict()
if sheet["D%d" % i].value is not None:
spin = sheet["D%d" % i].value
else:
spin = ""
# print("%s - %d - %s" % (el, charge, cn))
radii[el][charge][cn][spin] = {
"crystal_radius": float(sheet["E%d" % i].value),
"ionic_radius": float(sheet["F%d" % i].value),
}
i += 1
for el in radii.keys():
if el in data:
data[el]["Shannon radii"] = dict(radii[el])
with open('periodic_table.yaml', 'w') as f:
yaml.safe_dump(data, f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
def gen_periodic_table():
with open('periodic_table.yaml', 'r') as f:
data = yaml.load(f)
with open('periodic_table.json', 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
parse_shannon_radii()
#gen_periodic_table()
| czhengsci/pymatgen | dev_scripts/update_pt_data.py | Python | mit | 6,563 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class libsndfile(test.test):
"""
Autotest module for testing basic functionality
of libsndfile
@author Anitha MallojiRao amalloji@in.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./libsndfile.sh'], cwd="%s/libsndfile" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| PoornimaNayak/autotest-client-tests | linux-tools/libsndfile/libsndfile.py | Python | gpl-2.0 | 1,253 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Sends notification to search-server that it should update server index
#
import xmlrpclib
from spacewalk.common.rhnLog import log_error
class SearchNotify:
def __init__(self, host="127.0.0.1", port="2828"):
self.addr = "http://%s:%s" % (host, port)
def notify(self, indexName="server"):
try:
client = xmlrpclib.ServerProxy(self.addr)
result = client.admin.updateIndex(indexName)
except Exception, e:
log_error("Failed to notify search service located at %s to update %s indexes"
% (self.addr, indexName), e)
return False
return result
if __name__ == "__main__":
search = SearchNotify()
result = search.notify()
print "search.notify() = %s" % (result)
| xkollar/spacewalk | backend/server/rhnServer/search_notify.py | Python | gpl-2.0 | 1,394 |
# Copyright (C) 2011 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
from gi.repository import GConf
from gi.repository import Gst
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
DEFAULT_PITCH = 0
DEFAULT_RATE = 0
_speech_manager = None
class SpeechManager(GObject.GObject):
__gtype_name__ = 'SpeechManager'
__gsignals__ = {
'play': (GObject.SignalFlags.RUN_FIRST, None, []),
'pause': (GObject.SignalFlags.RUN_FIRST, None, []),
'stop': (GObject.SignalFlags.RUN_FIRST, None, [])
}
MIN_PITCH = -100
MAX_PITCH = 100
MIN_RATE = -100
MAX_RATE = 100
def __init__(self, **kwargs):
GObject.GObject.__init__(self, **kwargs)
self._player = _GstSpeechPlayer()
self._player.connect('play', self._update_state, 'play')
self._player.connect('stop', self._update_state, 'stop')
self._player.connect('pause', self._update_state, 'pause')
self._voice_name = self._player.get_default_voice()
self._pitch = DEFAULT_PITCH
self._rate = DEFAULT_RATE
self._is_playing = False
self._is_paused = False
self.restore()
def _update_state(self, player, signal):
self._is_playing = (signal == 'play')
self._is_paused = (signal == 'pause')
self.emit(signal)
def get_is_playing(self):
return self._is_playing
is_playing = GObject.property(type=bool, getter=get_is_playing,
setter=None, default=False)
def get_is_paused(self):
return self._is_paused
is_paused = GObject.property(type=bool, getter=get_is_paused,
setter=None, default=False)
def get_pitch(self):
return self._pitch
def get_rate(self):
return self._rate
def set_pitch(self, pitch):
self._pitch = pitch
self.save()
def set_rate(self, rate):
self._rate = rate
self.save()
def say_text(self, text):
if text:
self._player.speak(self._pitch, self._rate, self._voice_name, text)
def say_selected_text(self):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
clipboard.request_text(self.__primary_selection_cb, None)
def pause(self):
self._player.pause_sound_device()
def restart(self):
self._player.restart_sound_device()
def stop(self):
self._player.stop_sound_device()
def __primary_selection_cb(self, clipboard, text, user_data):
self.say_text(text)
def save(self):
client = GConf.Client.get_default()
client.set_int('/desktop/sugar/speech/pitch', self._pitch)
client.set_int('/desktop/sugar/speech/rate', self._rate)
logging.debug('saving speech configuration pitch %s rate %s',
self._pitch, self._rate)
def restore(self):
client = GConf.Client.get_default()
self._pitch = client.get_int('/desktop/sugar/speech/pitch')
self._rate = client.get_int('/desktop/sugar/speech/rate')
logging.debug('loading speech configuration pitch %s rate %s',
self._pitch, self._rate)
class _GstSpeechPlayer(GObject.GObject):
__gsignals__ = {
'play': (GObject.SignalFlags.RUN_FIRST, None, []),
'pause': (GObject.SignalFlags.RUN_FIRST, None, []),
'stop': (GObject.SignalFlags.RUN_FIRST, None, [])
}
def __init__(self):
GObject.GObject.__init__(self)
self._pipeline = None
def restart_sound_device(self):
if self._pipeline is None:
logging.debug('Trying to restart not initialized sound device')
return
self._pipeline.set_state(Gst.State.PLAYING)
self.emit('play')
def pause_sound_device(self):
if self._pipeline is None:
return
self._pipeline.set_state(Gst.State.PAUSED)
self.emit('pause')
def stop_sound_device(self):
if self._pipeline is None:
return
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
def make_pipeline(self, command):
if self._pipeline is not None:
self.stop_sound_device()
del self._pipeline
self._pipeline = Gst.parse_launch(command)
bus = self._pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.__pipe_message_cb)
def __pipe_message_cb(self, bus, message):
if message.type == Gst.MessageType.EOS:
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
elif message.type == Gst.MessageType.ERROR:
self._pipeline.set_state(Gst.State.NULL)
self.emit('stop')
def speak(self, pitch, rate, voice_name, text):
# TODO workaround for http://bugs.sugarlabs.org/ticket/1801
if not [i for i in text if i.isalnum()]:
return
self.make_pipeline('espeak name=espeak ! autoaudiosink')
src = self._pipeline.get_by_name('espeak')
src.props.text = text
src.props.pitch = pitch
src.props.rate = rate
src.props.voice = voice_name
src.props.track = 2 # track for marks
self.restart_sound_device()
def get_all_voices(self):
all_voices = {}
for voice in Gst.ElementFactory.make('espeak', None).props.voices:
name, language, dialect = voice
if dialect != 'none':
all_voices[language + '_' + dialect] = name
else:
all_voices[language] = name
return all_voices
def get_default_voice(self):
"""Try to figure out the default voice, from the current locale ($LANG)
Fall back to espeak's voice called Default."""
voices = self.get_all_voices()
locale = os.environ.get('LANG', '')
language_location = locale.split('.', 1)[0].lower()
language = language_location.split('_')[0]
# if the language is es but not es_es default to es_la (latin voice)
if language == 'es' and language_location != 'es_es':
language_location = 'es_la'
best = voices.get(language_location) or voices.get(language) \
or 'default'
logging.debug('Best voice for LANG %s seems to be %s',
locale, best)
return best
def get_speech_manager():
global _speech_manager
if _speech_manager is None:
_speech_manager = SpeechManager()
return _speech_manager
| gusDuarte/sugar | src/jarabe/model/speech.py | Python | gpl-2.0 | 7,214 |
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
from PyQt4.QtGui import QSplashScreen, QPixmap
from PyQt4.QtCore import Qt
import ilastik
splashScreen = None
def showSplashScreen():
splash_path = os.path.join(os.path.split(ilastik.__file__)[0], 'ilastik-splash.png')
splashImage = QPixmap(splash_path)
global splashScreen
splashScreen = QSplashScreen(splashImage)
splashScreen.showMessage( ilastik.__version__, Qt.AlignBottom | Qt.AlignRight )
splashScreen.show()
def hideSplashScreen():
import startShellGui
global splashScreen
splashScreen.finish(startShellGui.shell)
| nielsbuwen/ilastik | ilastik/shell/gui/splashScreen.py | Python | gpl-3.0 | 1,515 |
from plugins.qlprofile.qlprofile import get_profile
| MinoMino/minqlbot-plugins | plugins/qlprofile/__init__.py | Python | gpl-3.0 | 52 |
# -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
]
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/python/compat.py | Python | gpl-3.0 | 19,303 |
"""
API views for badges
"""
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from opaque_keys import InvalidKeyError
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
from rest_framework import generics
from rest_framework.exceptions import APIException
from badges.models import BadgeAssertion
from openedx.core.djangoapps.user_api.permissions import is_field_shared_factory
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from .serializers import BadgeAssertionSerializer
class InvalidCourseKeyError(APIException):
"""
Raised the course key given isn't valid.
"""
status_code = 400
default_detail = "The course key provided was invalid."
class UserBadgeAssertions(generics.ListAPIView):
"""
** Use cases **
Request a list of assertions for a user, optionally constrained to a course.
** Example Requests **
GET /api/badges/v1/assertions/user/{username}/
** Response Values **
Body comprised of a list of objects with the following fields:
* badge_class: The badge class the assertion was awarded for. Represented as an object
with the following fields:
* slug: The identifier for the badge class
* issuing_component: The software component responsible for issuing this badge.
* display_name: The display name of the badge.
* course_id: The course key of the course this badge is scoped to, or null if it isn't scoped to a course.
* description: A description of the award and its significance.
* criteria: A description of what is needed to obtain this award.
* image_url: A URL to the icon image used to represent this award.
* image_url: The baked assertion image derived from the badge_class icon-- contains metadata about the award
in its headers.
* assertion_url: The URL to the OpenBadges BadgeAssertion object, for verification by compatible tools
and software.
** Params **
* slug (optional): The identifier for a particular badge class to filter by.
* issuing_component (optional): The issuing component for a particular badge class to filter by
(requires slug to have been specified, or this will be ignored.) If slug is provided and this is not,
assumes the issuing_component should be empty.
* course_id (optional): Returns assertions that were awarded as part of a particular course. If slug is
provided, and this field is not specified, assumes that the target badge has an empty course_id field.
'*' may be used to get all badges with the specified slug, issuing_component combination across all courses.
** Returns **
* 200 on success, with a list of Badge Assertion objects.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the specified user does not exist
{
"count": 7,
"previous": null,
"num_pages": 1,
"results": [
{
"badge_class": {
"slug": "special_award",
"issuing_component": "openedx__course",
"display_name": "Very Special Award",
"course_id": "course-v1:edX+DemoX+Demo_Course",
"description": "Awarded for people who did something incredibly special",
"criteria": "Do something incredibly special.",
"image": "http://example.com/media/badge_classes/badges/special_xdpqpBv_9FYOZwN.png"
},
"image_url": "http://badges.example.com/media/issued/cd75b69fc1c979fcc1697c8403da2bdf.png",
"assertion_url": "http://badges.example.com/public/assertions/07020647-e772-44dd-98b7-d13d34335ca6"
},
...
]
}
"""
serializer_class = BadgeAssertionSerializer
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser
)
permission_classes = (is_field_shared_factory("accomplishments_shared"),)
def filter_queryset(self, queryset):
"""
Return most recent to least recent badge.
"""
return queryset.order_by('-created')
def get_queryset(self):
"""
Get all badges for the username specified.
"""
queryset = BadgeAssertion.objects.filter(user__username=self.kwargs['username'])
provided_course_id = self.request.query_params.get('course_id')
if provided_course_id == '*':
# We might want to get all the matching course scoped badges to see how many courses
# a user managed to get a specific award on.
course_id = None
elif provided_course_id:
try:
course_id = CourseKey.from_string(provided_course_id)
except InvalidKeyError:
raise InvalidCourseKeyError
elif 'slug' not in self.request.query_params:
# Need to get all badges for the user.
course_id = None
else:
# Django won't let us use 'None' for querying a ForeignKey field. We have to use this special
# 'Empty' value to indicate we're looking only for badges without a course key set.
course_id = CourseKeyField.Empty
if course_id is not None:
queryset = queryset.filter(badge_class__course_id=course_id)
if self.request.query_params.get('slug'):
queryset = queryset.filter(
badge_class__slug=self.request.query_params['slug'],
badge_class__issuing_component=self.request.query_params.get('issuing_component', '')
)
return queryset
| teltek/edx-platform | lms/djangoapps/badges/api/views.py | Python | agpl-3.0 | 6,051 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from os import chmod
from spack import *
class Tbl2asn(Package):
"""Tbl2asn is a command-line program that automates the creation of
sequence records for submission to GenBank."""
homepage = "https://www.ncbi.nlm.nih.gov/genbank/tbl2asn2/"
version('2020-03-01', sha256='7cc1119d3cfcbbffdbd4ecf33cef8bbdd44fc5625c72976bee08b1157625377e')
def url_for_version(self, ver):
return "https://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/linux.tbl2asn.gz"
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('../linux.tbl2asn', prefix.bin.tbl2asn)
chmod(prefix.bin.tbl2asn, 0o775)
| LLNL/spack | var/spack/repos/builtin/packages/tbl2asn/package.py | Python | lgpl-2.1 | 866 |
"""
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v 1.35 2004/03/20 07:46:04 fdrake Exp $
"""
import os, urlparse, urllib2, types
import handler
import xmlreader
import sys, _exceptions, saxlib
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
def __str__(self):
if self.__line is None:
line = "?"
else:
line = self.__line
if self.__col is None:
col = "?"
else:
col = self.__col
return "%s:%s:%s" % (
self.__sysid or self.__pubid or "<unknown>",
line, col)
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
if hasattr(codecs, "register_error"):
def writetext(stream, text, entities={}):
stream.errors = "xmlcharrefreplace"
stream.write(escape(text, entities))
stream.errors = "strict"
else:
def writetext(stream, text, entities={}):
text = escape(text, entities)
try:
stream.write(text)
except UnicodeError:
for c in text:
try:
stream.write(c)
except UnicodeError:
stream.write(u"&#%d;" % ord(c))
def writeattr(stream, text):
countdouble = text.count('"')
if countdouble:
countsingle = text.count("'")
if countdouble <= countsingle:
entities = {'"': """}
quote = '"'
else:
entities = {"'": "'"}
quote = "'"
else:
entities = {}
quote = '"'
stream.write(quote)
writetext(stream, text, entities)
stream.write(quote)
class XMLGenerator(handler.ContentHandler):
GENERATED_PREFIX = "pyxml.sax.saxutils.prefix%s"
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._generated_prefix_ctr = 0
return
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
name = name[1]
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write('<' + name)
for k,v in self._undeclared_ns_maps:
if k is None:
self._out.write(' xmlns="%s"' % (v or ''))
else:
self._out.write(' xmlns:%s="%s"' % (k,v))
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
#If an attribute has a nsuri but not a prefix, we must
#create a prefix and add a nsdecl
prefix = self.GENERATED_PREFIX % self._generated_prefix_ctr
self._generated_prefix_ctr = self._generated_prefix_ctr + 1
name = prefix + ':' + name[1]
self._out.write(' xmlns:%s=%s' % (prefix, quoteattr(name[0])))
self._current_context[name[0]] = prefix
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElementNS(self, name, qname):
# XXX: if qname is not None, we better use it.
# Python 2.0b2 requires us to use the recorded prefix for
# name[0], though
if name[0] is None:
qname = name[1]
elif self._current_context[name[0]] is None:
qname = name[1]
else:
qname = self._current_context[name[0]] + ":" + name[1]
self._out.write('</%s>' % qname)
def characters(self, content):
writetext(self._out, content)
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
class LexicalXMLGenerator(XMLGenerator, saxlib.LexicalHandler):
"""A XMLGenerator that also supports the LexicalHandler interface"""
def __init__(self, out=None, encoding="iso-8859-1"):
XMLGenerator.__init__(self, out, encoding)
self._in_cdata = 0
def characters(self, content):
if self._in_cdata:
self._out.write(content.replace(']]>', ']]>]]><![CDATA['))
else:
self._out.write(escape(content))
# LexicalHandler methods
# (we only support the most important ones and inherit the rest)
def startDTD(self, name, public_id, system_id):
self._out.write('<!DOCTYPE %s' % name)
if public_id:
self._out.write(' PUBLIC %s %s' % (
quoteattr(public_id or ""), quoteattr(system_id or "")
))
elif system_id:
self._out.write(' SYSTEM %s' % quoteattr(system_id or ""))
def endDTD(self):
self._out.write('>')
def comment(self, content):
self._out.write('<!--')
self._out.write(content)
self._out.write('-->')
def startCDATA(self):
self._in_cdata = 1
self._out.write('<![CDATA[')
def endCDATA(self):
self._in_cdata = 0
self._out.write(']]>')
# --- ContentGenerator is the SAX1 DocumentHandler for writing back XML
class ContentGenerator(XMLGenerator):
def characters(self, str, start, end):
# In SAX1, characters receives start and end; in SAX2, it receives
# a string. For plain strings, we may want to use a buffer object.
return XMLGenerator.characters(self, str[start:start+end])
# --- XMLFilterImpl
class XMLFilterBase(saxlib.XMLFilter):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# FIXME: remove this backward compatibility hack when not needed anymore
XMLFilterImpl = XMLFilterBase
# --- BaseIncrementalParser
class BaseIncrementalParser(xmlreader.IncrementalParser):
"""This class implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def parse(self, source):
source = prepare_input_source(source)
self.prepareParser(source)
self._cont_handler.startDocument()
# FIXME: what about char-stream?
inf = source.getByteStream()
buffer = inf.read(16384)
while buffer != "":
self.feed(buffer)
buffer = inf.read(16384)
self.close()
self.reset()
self._cont_handler.endDocument()
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
if os.path.isfile(sysid):
basehead = os.path.split(os.path.normpath(base))[0]
source.setSystemId(os.path.join(basehead, sysid))
f = open(sysid, "rb")
else:
source.setSystemId(urlparse.urljoin(base, sysid))
f = urllib2.urlopen(source.getSystemId())
source.setByteStream(f)
return source
# ===========================================================================
#
# DEPRECATED SAX 1.0 CLASSES
#
# ===========================================================================
# --- AttributeMap
class AttributeMap:
"""An implementation of AttributeList that takes an (attr,val) hash
and uses it to implement the AttributeList interface."""
def __init__(self, map):
self.map=map
def getLength(self):
return len(self.map.keys())
def getName(self, i):
try:
return self.map.keys()[i]
except IndexError,e:
return None
def getType(self, i):
return "CDATA"
def getValue(self, i):
try:
if type(i)==types.IntType:
return self.map[self.getName(i)]
else:
return self.map[i]
except KeyError,e:
return None
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key)==types.IntType:
return self.map.keys()[key]
else:
return self.map[key]
def items(self):
return self.map.items()
def keys(self):
return self.map.keys()
def has_key(self,key):
return self.map.has_key(key)
def get(self, key, alternative=None):
return self.map.get(key, alternative)
def copy(self):
return AttributeMap(self.map.copy())
def values(self):
return self.map.values()
# --- Event broadcasting object
class EventBroadcaster:
"""Takes a list of objects and forwards any method calls received
to all objects in the list. The attribute list holds the list and
can freely be modified by clients."""
class Event:
"Helper objects that represent event methods."
def __init__(self,list,name):
self.list=list
self.name=name
def __call__(self,*rest):
for obj in self.list:
apply(getattr(obj,self.name), rest)
def __init__(self,list):
self.list=list
def __getattr__(self,name):
return self.Event(self.list,name)
def __repr__(self):
return "<EventBroadcaster instance at %d>" % id(self)
# --- ESIS document handler
import saxlib
class ESISDocHandler(saxlib.HandlerBase):
"A SAX document handler that produces naive ESIS output."
def __init__(self,writer=sys.stdout):
self.writer=writer
def processingInstruction (self,target, remainder):
"""Receive an event signalling that a processing instruction
has been found."""
self.writer.write("?"+target+" "+remainder+"\n")
def startElement(self,name,amap):
"Receive an event signalling the start of an element."
self.writer.write("("+name+"\n")
for a_name in amap.keys():
self.writer.write("A"+a_name+" "+amap[a_name]+"\n")
def endElement(self,name):
"Receive an event signalling the end of an element."
self.writer.write(")"+name+"\n")
def characters(self,data,start_ix,length):
"Receive an event signalling that character data has been found."
self.writer.write("-"+data[start_ix:start_ix+length]+"\n")
# --- XML canonizer
class Canonizer(saxlib.HandlerBase):
"A SAX document handler that produces canonized XML output."
def __init__(self,writer=sys.stdout):
self.elem_level=0
self.writer=writer
def processingInstruction (self,target, remainder):
if not target=="xml":
self.writer.write("<?"+target+" "+remainder+"?>")
def startElement(self,name,amap):
self.writer.write("<"+name)
a_names=amap.keys()
a_names.sort()
for a_name in a_names:
self.writer.write(" "+a_name+"=\"")
self.write_data(amap[a_name])
self.writer.write("\"")
self.writer.write(">")
self.elem_level=self.elem_level+1
def endElement(self,name):
self.writer.write("</"+name+">")
self.elem_level=self.elem_level-1
def ignorableWhitespace(self,data,start_ix,length):
self.characters(data,start_ix,length)
def characters(self,data,start_ix,length):
if self.elem_level>0:
self.write_data(data[start_ix:start_ix+length])
def write_data(self,data):
"Writes datachars to writer."
data=data.replace("&","&")
data=data.replace("<","<")
data=data.replace("\"",""")
data=data.replace(">",">")
data=data.replace(chr(9),"	")
data=data.replace(chr(10)," ")
data=data.replace(chr(13)," ")
self.writer.write(data)
# --- mllib
class mllib:
"""A re-implementation of the htmllib, sgmllib and xmllib interfaces as a
SAX DocumentHandler."""
# Unsupported:
# - setnomoretags
# - setliteral
# - translate_references
# - handle_xml
# - handle_doctype
# - handle_charref
# - handle_entityref
# - handle_comment
# - handle_cdata
# - tag_attributes
def __init__(self):
self.reset()
def reset(self):
import saxexts # only used here
self.parser=saxexts.XMLParserFactory.make_parser()
self.handler=mllib.Handler(self.parser,self)
self.handler.reset()
def feed(self,data):
self.parser.feed(data)
def close(self):
self.parser.close()
def get_stack(self):
return self.handler.get_stack()
# --- Handler methods (to be overridden)
def handle_starttag(self,name,method,atts):
method(atts)
def handle_endtag(self,name,method):
method()
def handle_data(self,data):
pass
def handle_proc(self,target,data):
pass
def unknown_starttag(self,name,atts):
pass
def unknown_endtag(self,name):
pass
def syntax_error(self,message):
pass
# --- The internal handler class
class Handler(saxlib.DocumentHandler,saxlib.ErrorHandler):
"""An internal class to handle SAX events and translate them to mllib
events."""
def __init__(self,driver,handler):
self.driver=driver
self.driver.setDocumentHandler(self)
self.driver.setErrorHandler(self)
self.handler=handler
self.reset()
def get_stack(self):
return self.stack
def reset(self):
self.stack=[]
# --- DocumentHandler methods
def characters(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def endElement(self, name):
if hasattr(self.handler,"end_"+name):
self.handler.handle_endtag(name,
getattr(self.handler,"end_"+name))
else:
self.handler.unknown_endtag(name)
del self.stack[-1]
def ignorableWhitespace(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def processingInstruction(self, target, data):
self.handler.handle_proc(target,data)
def startElement(self, name, atts):
self.stack.append(name)
if hasattr(self.handler,"start_"+name):
self.handler.handle_starttag(name,
getattr(self.handler,
"start_"+name),
atts)
else:
self.handler.unknown_starttag(name,atts)
# --- ErrorHandler methods
def error(self, exception):
self.handler.syntax_error(str(exception))
def fatalError(self, exception):
raise RuntimeError(str(exception))
| selfcommit/gaedav | pyxml/sax/saxutils.py | Python | lgpl-2.1 | 24,606 |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
try_get,
unified_timestamp,
url_or_none,
)
class PinterestBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'
def _call_api(self, resource, video_id, options):
return self._download_json(
'https://www.pinterest.com/resource/%sResource/get/' % resource,
video_id, 'Download %s JSON metadata' % resource, query={
'data': json.dumps({'options': options})
})['resource_response']
def _extract_video(self, data, extract_formats=True):
video_id = data['id']
title = (data.get('title') or data.get('grid_title') or video_id).strip()
urls = []
formats = []
duration = None
if extract_formats:
for format_id, format_dict in data['videos']['video_list'].items():
if not isinstance(format_dict, dict):
continue
format_url = url_or_none(format_dict.get('url'))
if not format_url or format_url in urls:
continue
urls.append(format_url)
duration = float_or_none(format_dict.get('duration'), scale=1000)
ext = determine_ext(format_url)
if 'hls' in format_id.lower() or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
'width': int_or_none(format_dict.get('width')),
'height': int_or_none(format_dict.get('height')),
'duration': duration,
})
self._sort_formats(
formats, field_preference=('height', 'width', 'tbr', 'format_id'))
description = data.get('description') or data.get('description_html') or data.get('seo_description')
timestamp = unified_timestamp(data.get('created_at'))
def _u(field):
return try_get(data, lambda x: x['closeup_attribution'][field], compat_str)
uploader = _u('full_name')
uploader_id = _u('id')
repost_count = int_or_none(data.get('repin_count'))
comment_count = int_or_none(data.get('comment_count'))
categories = try_get(data, lambda x: x['pin_join']['visual_annotation'], list)
tags = data.get('hashtags')
thumbnails = []
images = data.get('images')
if isinstance(images, dict):
for thumbnail_id, thumbnail in images.items():
if not isinstance(thumbnail, dict):
continue
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'thumbnails': thumbnails,
'uploader': uploader,
'uploader_id': uploader_id,
'repost_count': repost_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
'formats': formats,
'extractor_key': PinterestIE.ie_key(),
}
class PinterestIE(PinterestBaseIE):
_VALID_URL = r'%s/pin/(?P<id>\d+)' % PinterestBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'https://www.pinterest.com/pin/664281013778109217/',
'md5': '6550c2af85d6d9f3fe3b88954d1577fc',
'info_dict': {
'id': '664281013778109217',
'ext': 'mp4',
'title': 'Origami',
'description': 'md5:b9d90ddf7848e897882de9e73344f7dd',
'duration': 57.7,
'timestamp': 1593073622,
'upload_date': '20200625',
'uploader': 'Love origami -I am Dafei',
'uploader_id': '586523688879454212',
'repost_count': 50,
'comment_count': 0,
'categories': list,
'tags': list,
},
}, {
'url': 'https://co.pinterest.com/pin/824721750502199491/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._call_api(
'Pin', video_id, {
'field_set_key': 'unauth_react_main_pin',
'id': video_id,
})['data']
return self._extract_video(data)
class PinterestCollectionIE(PinterestBaseIE):
_VALID_URL = r'%s/(?P<username>[^/]+)/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'https://www.pinterest.ca/mashal0407/cool-diys/',
'info_dict': {
'id': '585890301462791043',
'title': 'cool diys',
},
'playlist_count': 8,
}, {
'url': 'https://www.pinterest.ca/fudohub/videos/',
'info_dict': {
'id': '682858430939307450',
'title': 'VIDEOS',
},
'playlist_mincount': 365,
'skip': 'Test with extract_formats=False',
}]
@classmethod
def suitable(cls, url):
return False if PinterestIE.suitable(url) else super(
PinterestCollectionIE, cls).suitable(url)
def _real_extract(self, url):
username, slug = re.match(self._VALID_URL, url).groups()
board = self._call_api(
'Board', slug, {
'slug': slug,
'username': username
})['data']
board_id = board['id']
options = {
'board_id': board_id,
'page_size': 250,
}
bookmark = None
entries = []
while True:
if bookmark:
options['bookmarks'] = [bookmark]
board_feed = self._call_api('BoardFeed', board_id, options)
for item in (board_feed.get('data') or []):
if not isinstance(item, dict) or item.get('type') != 'pin':
continue
video_id = item.get('id')
if video_id:
# Some pins may not be available anonymously via pin URL
# video = self._extract_video(item, extract_formats=False)
# video.update({
# '_type': 'url_transparent',
# 'url': 'https://www.pinterest.com/pin/%s/' % video_id,
# })
# entries.append(video)
entries.append(self._extract_video(item))
bookmark = board_feed.get('bookmark')
if not bookmark:
break
return self.playlist_result(
entries, playlist_id=board_id, playlist_title=board.get('name'))
| rg3/youtube-dl | youtube_dl/extractor/pinterest.py | Python | unlicense | 7,598 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub implementation of the modules service."""
from google.appengine.api import apiproxy_stub
from google.appengine.api import request_info
from google.appengine.api.modules import modules_service_pb
from google.appengine.runtime import apiproxy_errors
class ModulesServiceStub(apiproxy_stub.APIProxyStub):
_ACCEPTS_REQUEST_ID = True
THREADSAFE = True
def __init__(self, request_data):
super(ModulesServiceStub, self).__init__('modules',
request_data=request_data)
def _GetModuleFromRequest(self, request, request_id):
dispatcher = self.request_data.get_dispatcher()
if request.has_module():
module = request.module()
else:
module = self.request_data.get_module(request_id)
return module, dispatcher
def _GetModuleAndVersionFromRequest(self, request, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
if request.has_version():
version = request.version()
else:
version = self.request_data.get_version(request_id)
if version not in dispatcher.get_versions(module):
version = dispatcher.get_default_version(module)
return module, version, dispatcher
def _Dynamic_GetModules(self, request, response, request_id):
dispatcher = self.request_data.get_dispatcher()
for module in dispatcher.get_module_names():
response.add_module(module)
def _Dynamic_GetVersions(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
for version in dispatcher.get_versions(module):
response.add_version(version)
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetDefaultVersion(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
response.set_version(dispatcher.get_default_version(module))
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_instances(dispatcher.get_num_instances(module, version))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_SetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.set_num_instances(module, version, request.instances())
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_StartModule(self, request, response, request_id):
module = request.module()
version = request.version()
dispatcher = self.request_data.get_dispatcher()
try:
dispatcher.start_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStartedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_StopModule(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.stop_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStoppedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_GetHostname(self, request, response, request_id):
if request.has_instance():
instance = request.instance()
else:
instance = None
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_hostname(dispatcher.get_hostname(module, version, instance))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
except request_info.InvalidInstanceIdError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_INSTANCES)
| yencarnacion/jaikuengine | .google_appengine/google/appengine/api/modules/modules_stub.py | Python | apache-2.0 | 5,933 |
"""
Support for TP-Link routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tplink/
"""
import base64
from datetime import datetime
import hashlib
import logging
import re
from aiohttp.hdrs import (
ACCEPT, COOKIE, PRAGMA, REFERER, CONNECTION, KEEP_ALIVE, USER_AGENT,
CONTENT_TYPE, CACHE_CONTROL, ACCEPT_ENCODING, ACCEPT_LANGUAGE)
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, HTTP_HEADER_X_REQUESTED_WITH)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['tplink==0.2.1']
_LOGGER = logging.getLogger(__name__)
HTTP_HEADER_NO_CACHE = 'no-cache'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string
})
def get_scanner(hass, config):
"""
Validate the configuration and return a TP-Link scanner.
The default way of integrating devices is to use a pypi
package, The TplinkDeviceScanner has been refactored
to depend on a pypi package, the other implementations
should be gradually migrated in the pypi package
"""
for cls in [
TplinkDeviceScanner, Tplink5DeviceScanner, Tplink4DeviceScanner,
Tplink3DeviceScanner, Tplink2DeviceScanner, Tplink1DeviceScanner
]:
scanner = cls(config[DOMAIN])
if scanner.success_init:
return scanner
return None
class TplinkDeviceScanner(DeviceScanner):
"""Queries the router for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from tplink.tplink import TpLinkClient
host = config[CONF_HOST]
password = config[CONF_PASSWORD]
username = config[CONF_USERNAME]
self.success_init = False
try:
self.tplink_client = TpLinkClient(
password, host=host, username=username)
self.last_results = {}
self.success_init = self._update_info()
except requests.exceptions.ConnectionError:
_LOGGER.debug("ConnectionError in TplinkDeviceScanner")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get the name of the device."""
return self.last_results.get(device)
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
result = self.tplink_client.get_connected_devices()
if result:
self.last_results = result
return True
return False
class Tplink1DeviceScanner(DeviceScanner):
"""This class queries a wireless router running TP-Link firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_macs = re.compile('[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}-' +
'[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}')
self.host = host
self.username = username
self.password = password
self.last_results = {}
self.success_init = False
try:
self.success_init = self._update_info()
except requests.exceptions.ConnectionError:
_LOGGER.debug("ConnectionError in Tplink1DeviceScanner")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return None
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/userRpm/WlanStationRpm.htm'.format(self.host)
referer = 'http://{}'.format(self.host)
page = requests.get(
url, auth=(self.username, self.password),
headers={REFERER: referer}, timeout=4)
result = self.parse_macs.findall(page.text)
if result:
self.last_results = [mac.replace("-", ":") for mac in result]
return True
return False
class Tplink2DeviceScanner(Tplink1DeviceScanner):
"""This class queries a router with newer version of TP-Link firmware."""
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return self.last_results.get(device)
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/data/map_access_wireless_client_grid.json' \
.format(self.host)
referer = 'http://{}'.format(self.host)
# Router uses Authorization cookie instead of header
# Let's create the cookie
username_password = '{}:{}'.format(self.username, self.password)
b64_encoded_username_password = base64.b64encode(
username_password.encode('ascii')
).decode('ascii')
cookie = 'Authorization=Basic {}' \
.format(b64_encoded_username_password)
response = requests.post(
url, headers={REFERER: referer, COOKIE: cookie},
timeout=4)
try:
result = response.json().get('data')
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac_addr'].replace('-', ':'): device['name']
for device in result
}
return True
return False
class Tplink3DeviceScanner(Tplink1DeviceScanner):
"""This class queries the Archer C9 router with version 150811 or high."""
def __init__(self, config):
"""Initialize the scanner."""
self.stok = ''
self.sysauth = ''
super(Tplink3DeviceScanner, self).__init__(config)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
self._log_out()
return self.last_results.keys()
def get_device_name(self, device):
"""Get the firmware doesn't save the name of the wireless device.
We are forced to use the MAC address as name here.
"""
return self.last_results.get(device)
def _get_auth_tokens(self):
"""Retrieve auth tokens from the router."""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/cgi-bin/luci/;stok=/login?form=login' \
.format(self.host)
referer = 'http://{}/webpages/login.html'.format(self.host)
# If possible implement RSA encryption of password here.
response = requests.post(
url, params={'operation': 'login', 'username': self.username,
'password': self.password},
headers={REFERER: referer}, timeout=4)
try:
self.stok = response.json().get('data').get('stok')
_LOGGER.info(self.stok)
regex_result = re.search(
'sysauth=(.*);', response.headers['set-cookie'])
self.sysauth = regex_result.group(1)
_LOGGER.info(self.sysauth)
return True
except (ValueError, KeyError):
_LOGGER.error("Couldn't fetch auth tokens! Response was: %s",
response.text)
return False
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
if (self.stok == '') or (self.sysauth == ''):
self._get_auth_tokens()
_LOGGER.info("Loading wireless clients...")
url = ('http://{}/cgi-bin/luci/;stok={}/admin/wireless?'
'form=statistics').format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
response = requests.post(
url, params={'operation': 'load'}, headers={REFERER: referer},
cookies={'sysauth': self.sysauth}, timeout=5)
try:
json_response = response.json()
if json_response.get('success'):
result = response.json().get('data')
else:
if json_response.get('errorcode') == 'timeout':
_LOGGER.info("Token timed out. Relogging on next scan")
self.stok = ''
self.sysauth = ''
return False
_LOGGER.error(
"An unknown error happened while fetching data")
return False
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct")
return False
if result:
self.last_results = {
device['mac'].replace('-', ':'): device['mac']
for device in result
}
return True
return False
def _log_out(self):
_LOGGER.info("Logging out of router admin interface...")
url = ('http://{}/cgi-bin/luci/;stok={}/admin/system?'
'form=logout').format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
requests.post(
url, params={'operation': 'write'}, headers={REFERER: referer},
cookies={'sysauth': self.sysauth})
self.stok = ''
self.sysauth = ''
class Tplink4DeviceScanner(Tplink1DeviceScanner):
"""This class queries an Archer C7 router with TP-Link firmware 150427."""
def __init__(self, config):
"""Initialize the scanner."""
self.credentials = ''
self.token = ''
super(Tplink4DeviceScanner, self).__init__(config)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Get the name of the wireless device."""
return None
def _get_auth_tokens(self):
"""Retrieve auth tokens from the router."""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/userRpm/LoginRpm.htm?Save=Save'.format(self.host)
# Generate md5 hash of password. The C7 appears to use the first 15
# characters of the password only, so we truncate to remove additional
# characters from being hashed.
password = hashlib.md5(self.password.encode('utf')[:15]).hexdigest()
credentials = '{}:{}'.format(self.username, password).encode('utf')
# Encode the credentials to be sent as a cookie.
self.credentials = base64.b64encode(credentials).decode('utf')
# Create the authorization cookie.
cookie = 'Authorization=Basic {}'.format(self.credentials)
response = requests.get(url, headers={COOKIE: cookie})
try:
result = re.search(r'window.parent.location.href = '
r'"https?:\/\/.*\/(.*)\/userRpm\/Index.htm";',
response.text)
if not result:
return False
self.token = result.group(1)
return True
except ValueError:
_LOGGER.error("Couldn't fetch auth tokens")
return False
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
if (self.credentials == '') or (self.token == ''):
self._get_auth_tokens()
_LOGGER.info("Loading wireless clients...")
mac_results = []
# Check both the 2.4GHz and 5GHz client list URLs
for clients_url in ('WlanStationRpm.htm', 'WlanStationRpm_5g.htm'):
url = 'http://{}/{}/userRpm/{}' \
.format(self.host, self.token, clients_url)
referer = 'http://{}'.format(self.host)
cookie = 'Authorization=Basic {}'.format(self.credentials)
page = requests.get(url, headers={
COOKIE: cookie,
REFERER: referer,
})
mac_results.extend(self.parse_macs.findall(page.text))
if not mac_results:
return False
self.last_results = [mac.replace("-", ":") for mac in mac_results]
return True
class Tplink5DeviceScanner(Tplink1DeviceScanner):
"""This class queries a TP-Link EAP-225 AP with newer TP-Link FW."""
def scan_devices(self):
"""Scan for new devices and return a list with found MAC IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return None
def _update_info(self):
"""Ensure the information from the TP-Link AP is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
base_url = 'http://{}'.format(self.host)
header = {
USER_AGENT:
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12;"
" rv:53.0) Gecko/20100101 Firefox/53.0",
ACCEPT: "application/json, text/javascript, */*; q=0.01",
ACCEPT_LANGUAGE: "Accept-Language: en-US,en;q=0.5",
ACCEPT_ENCODING: "gzip, deflate",
CONTENT_TYPE: "application/x-www-form-urlencoded; charset=UTF-8",
HTTP_HEADER_X_REQUESTED_WITH: "XMLHttpRequest",
REFERER: "http://{}/".format(self.host),
CONNECTION: KEEP_ALIVE,
PRAGMA: HTTP_HEADER_NO_CACHE,
CACHE_CONTROL: HTTP_HEADER_NO_CACHE,
}
password_md5 = hashlib.md5(
self.password.encode('utf')).hexdigest().upper()
# Create a session to handle cookie easier
session = requests.session()
session.get(base_url, headers=header)
login_data = {"username": self.username, "password": password_md5}
session.post(base_url, login_data, headers=header)
# A timestamp is required to be sent as get parameter
timestamp = int(datetime.now().timestamp() * 1e3)
client_list_url = '{}/data/monitor.client.client.json'.format(
base_url)
get_params = {
'operation': 'load',
'_': timestamp,
}
response = session.get(
client_list_url, headers=header, params=get_params)
session.close()
try:
list_of_devices = response.json()
except ValueError:
_LOGGER.error("AP didn't respond with JSON. "
"Check if credentials are correct")
return False
if list_of_devices:
self.last_results = {
device['MAC'].replace('-', ':'): device['DeviceName']
for device in list_of_devices['data']
}
return True
return False
| PetePriority/home-assistant | homeassistant/components/device_tracker/tplink.py | Python | apache-2.0 | 16,061 |
"""Support for the cloud for text to speech service."""
from hass_nabucasa import Cloud
from hass_nabucasa.voice import MAP_VOICE, VoiceError
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from .const import DOMAIN
CONF_GENDER = "gender"
SUPPORT_LANGUAGES = list({key[0] for key in MAP_VOICE})
def validate_lang(value):
"""Validate chosen gender or language."""
lang = value.get(CONF_LANG)
if lang is None:
return value
gender = value.get(CONF_GENDER)
if gender is None:
gender = value[CONF_GENDER] = next(
(chk_gender for chk_lang, chk_gender in MAP_VOICE if chk_lang == lang), None
)
if (lang, gender) not in MAP_VOICE:
raise vol.Invalid("Unsupported language and gender specified.")
return value
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LANG): str,
vol.Optional(CONF_GENDER): str,
}
),
validate_lang,
)
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Cloud speech component."""
cloud: Cloud = hass.data[DOMAIN]
if discovery_info is not None:
language = None
gender = None
else:
language = config[CONF_LANG]
gender = config[CONF_GENDER]
return CloudProvider(cloud, language, gender)
class CloudProvider(Provider):
"""NabuCasa Cloud speech API provider."""
def __init__(self, cloud: Cloud, language: str, gender: str) -> None:
"""Initialize cloud provider."""
self.cloud = cloud
self.name = "Cloud"
self._language = language
self._gender = gender
if self._language is not None:
return
self._language, self._gender = cloud.client.prefs.tts_default_voice
cloud.client.prefs.async_listen_updates(self._sync_prefs)
async def _sync_prefs(self, prefs):
"""Sync preferences."""
self._language, self._gender = prefs.tts_default_voice
@property
def default_language(self):
"""Return the default language."""
return self._language
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options like voice, emotion."""
return [CONF_GENDER]
@property
def default_options(self):
"""Return a dict include default options."""
return {CONF_GENDER: self._gender}
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from NabuCasa Cloud."""
# Process TTS
try:
data = await self.cloud.voice.process_tts(
message, language, gender=options[CONF_GENDER]
)
except VoiceError:
return (None, None)
return ("mp3", data)
| kennedyshead/home-assistant | homeassistant/components/cloud/tts.py | Python | apache-2.0 | 2,957 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.oslogin_v1.proto import (
common_pb2 as google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2,
)
from google.cloud.oslogin_v1.proto import (
oslogin_pb2 as google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OsLoginServiceStub(object):
"""Cloud OS Login API
The Cloud OS Login API allows you to manage users and their associated SSH
public keys for logging into virtual machines on Google Cloud Platform.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.DeletePosixAccount = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/DeletePosixAccount",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/DeleteSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetLoginProfile = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/GetLoginProfile",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.FromString,
)
self.GetSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/GetSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,
)
self.ImportSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/ImportSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.FromString,
)
self.UpdateSshPublicKey = channel.unary_unary(
"/google.cloud.oslogin.v1.OsLoginService/UpdateSshPublicKey",
request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,
)
class OsLoginServiceServicer(object):
"""Cloud OS Login API
The Cloud OS Login API allows you to manage users and their associated SSH
public keys for logging into virtual machines on Google Cloud Platform.
"""
def DeletePosixAccount(self, request, context):
"""Deletes a POSIX account.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSshPublicKey(self, request, context):
"""Deletes an SSH public key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetLoginProfile(self, request, context):
"""Retrieves the profile information used for logging in to a virtual machine
on Google Compute Engine.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSshPublicKey(self, request, context):
"""Retrieves an SSH public key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportSshPublicKey(self, request, context):
"""Adds an SSH public key and returns the profile information. Default POSIX
account information is set when no username and UID exist as part of the
login profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateSshPublicKey(self, request, context):
"""Updates an SSH public key and returns the profile information. This method
supports patch semantics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_OsLoginServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"DeletePosixAccount": grpc.unary_unary_rpc_method_handler(
servicer.DeletePosixAccount,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"DeleteSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetLoginProfile": grpc.unary_unary_rpc_method_handler(
servicer.GetLoginProfile,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.SerializeToString,
),
"GetSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.GetSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.SerializeToString,
),
"ImportSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.ImportSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.SerializeToString,
),
"UpdateSshPublicKey": grpc.unary_unary_rpc_method_handler(
servicer.UpdateSshPublicKey,
request_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.FromString,
response_serializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.oslogin.v1.OsLoginService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| dhermes/gcloud-python | oslogin/google/cloud/oslogin_v1/proto/oslogin_pb2_grpc.py | Python | apache-2.0 | 7,621 |
import distutils.version
try:
import greenlet
getcurrent = greenlet.greenlet.getcurrent
GreenletExit = greenlet.greenlet.GreenletExit
preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__)
>= distutils.version.LooseVersion('0.3.2'))
greenlet = greenlet.greenlet
except ImportError, e:
raise
try:
from py.magic import greenlet
getcurrent = greenlet.getcurrent
GreenletExit = greenlet.GreenletExit
preserves_excinfo = False
except ImportError:
try:
from stackless import greenlet
getcurrent = greenlet.getcurrent
GreenletExit = greenlet.GreenletExit
preserves_excinfo = False
except ImportError:
try:
from support.stacklesss import greenlet, getcurrent, GreenletExit
preserves_excinfo = False
(greenlet, getcurrent, GreenletExit) # silence pyflakes
except ImportError, e:
raise ImportError("Unable to find an implementation of greenlet.")
| ioram7/keystone-federado-pgid2013 | build/eventlet/eventlet/support/greenlets.py | Python | apache-2.0 | 1,085 |
# dyndlg.py
# contributed by Curt Hagenlocher <chi@earthlink.net>
# Dialog Template params:
# Parameter 0 - Window caption
# Parameter 1 - Bounds (rect tuple)
# Parameter 2 - Window style
# Parameter 3 - Extended style
# Parameter 4 - Font tuple
# Parameter 5 - Menu name
# Parameter 6 - Window class
# Dialog item params:
# Parameter 0 - Window class
# Parameter 1 - Text
# Parameter 2 - ID
# Parameter 3 - Bounds
# Parameter 4 - Style
# Parameter 5 - Extended style
# Parameter 6 - Extra data
import win32ui
import win32con
from pywin.mfc import dialog, window
def MakeDlgTemplate():
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
dlg = [ ["Select Warehouse", (0, 0, 177, 93), style, None, (8, "MS Sans Serif")], ]
dlg.append([130, "Current Warehouse:", -1, (7, 7, 69, 9), cs | win32con.SS_LEFT])
dlg.append([130, "ASTORIA", 128, (16, 17, 99, 7), cs | win32con.SS_LEFT])
dlg.append([130, "New &Warehouse:", -1, (7, 29, 69, 9), cs | win32con.SS_LEFT])
s = win32con.WS_TABSTOP | cs
# dlg.append([131, None, 130, (5, 40, 110, 48),
# s | win32con.LBS_NOTIFY | win32con.LBS_SORT | win32con.LBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL | win32con.WS_BORDER])
dlg.append(["{8E27C92B-1264-101C-8A2F-040224009C02}", None, 131, (5, 40, 110, 48),win32con.WS_TABSTOP])
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 22, 50, 14), s])
dlg.append([128, "&Help", 100, (124, 74, 50, 14), s])
return dlg
def test1():
win32ui.CreateDialogIndirect( MakeDlgTemplate() ).DoModal()
def test2():
dialog.Dialog( MakeDlgTemplate() ).DoModal()
def test3():
dlg = win32ui.LoadDialogResource(win32ui.IDD_SET_TABSTOPS)
dlg[0][0] = 'New Dialog Title'
dlg[0][1] = (80, 20, 161, 60)
dlg[1][1] = '&Confusion:'
cs = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON
dlg.append([128, "&Help", 100, (111, 41, 40, 14), cs])
dialog.Dialog( dlg ).DoModal()
def test4():
page1=dialog.PropertyPage(win32ui.LoadDialogResource(win32ui.IDD_PROPDEMO1))
page2=dialog.PropertyPage(win32ui.LoadDialogResource(win32ui.IDD_PROPDEMO2))
ps=dialog.PropertySheet('Property Sheet/Page Demo', None, [page1, page2])
ps.DoModal()
def testall():
test1()
test2()
test3()
test4()
if __name__=='__main__':
testall() | zhanqxun/cv_fish | pythonwin/pywin/Demos/dyndlg.py | Python | apache-2.0 | 2,568 |
"""Config flow for the Daikin platform."""
import asyncio
import logging
from uuid import uuid4
from aiohttp import ClientError, web_exceptions
from async_timeout import timeout
from pydaikin.daikin_base import Appliance
from pydaikin.discovery import Discovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN, KEY_MAC, TIMEOUT
_LOGGER = logging.getLogger(__name__)
class FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the Daikin config flow."""
self.host = None
@property
def schema(self):
"""Return current schema."""
return vol.Schema(
{
vol.Required(CONF_HOST, default=self.host): str,
vol.Optional(CONF_API_KEY): str,
vol.Optional(CONF_PASSWORD): str,
}
)
async def _create_entry(self, host, mac, key=None, uuid=None, password=None):
"""Register new entry."""
if not self.unique_id:
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=host,
data={
CONF_HOST: host,
KEY_MAC: mac,
CONF_API_KEY: key,
CONF_UUID: uuid,
CONF_PASSWORD: password,
},
)
async def _create_device(self, host, key=None, password=None):
"""Create device."""
# BRP07Cxx devices needs uuid together with key
if key:
uuid = str(uuid4())
else:
uuid = None
key = None
if not password:
password = None
try:
with timeout(TIMEOUT):
device = await Appliance.factory(
host,
self.hass.helpers.aiohttp_client.async_get_clientsession(),
key=key,
uuid=uuid,
password=password,
)
except asyncio.TimeoutError:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "cannot_connect"},
)
except web_exceptions.HTTPForbidden:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "invalid_auth"},
)
except ClientError:
_LOGGER.exception("ClientError")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error creating device")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
mac = device.mac
return await self._create_entry(host, mac, key, uuid, password)
async def async_step_user(self, user_input=None):
"""User initiated config flow."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=self.schema)
return await self._create_device(
user_input[CONF_HOST],
user_input.get(CONF_API_KEY),
user_input.get(CONF_PASSWORD),
)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered Daikin device."""
_LOGGER.debug("Zeroconf user_input: %s", discovery_info)
devices = Discovery().poll(ip=discovery_info[CONF_HOST])
if not devices:
_LOGGER.debug(
"Could not find MAC-address for %s,"
" make sure the required UDP ports are open (see integration documentation)",
discovery_info[CONF_HOST],
)
return self.async_abort(reason="cannot_connect")
await self.async_set_unique_id(next(iter(devices))[KEY_MAC])
self._abort_if_unique_id_configured()
self.host = discovery_info[CONF_HOST]
return await self.async_step_user()
| sander76/home-assistant | homeassistant/components/daikin/config_flow.py | Python | apache-2.0 | 4,406 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova.db import api as db
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class VirtCPUModel(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'arch': fields.ArchitectureField(nullable=True),
'vendor': fields.StringField(nullable=True),
'topology': fields.ObjectField('VirtCPUTopology',
nullable=True),
'features': fields.ListOfObjectsField("VirtCPUFeature",
default=[]),
'mode': fields.CPUModeField(nullable=True),
'model': fields.StringField(nullable=True),
'match': fields.CPUMatchField(nullable=True),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, None)
def to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
@classmethod
def from_json(cls, jsonstr):
return cls.obj_from_primitive(jsonutils.loads(jsonstr))
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['vcpu_model'])
if not db_extra or not db_extra['vcpu_model']:
return None
return cls.obj_from_primitive(jsonutils.loads(db_extra['vcpu_model']))
@base.NovaObjectRegistry.register
class VirtCPUFeature(base.NovaObject):
VERSION = '1.0'
fields = {
'policy': fields.CPUFeaturePolicyField(nullable=True),
'name': fields.StringField(nullable=False),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, None)
| rahulunair/nova | nova/objects/vcpu_model.py | Python | apache-2.0 | 2,317 |
"""
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import print_function, division
from collections import defaultdict
from sympy import Matrix, Rational
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, sympify, Add, S
from sympy.core.compatibility import string_types, reduce, range
from sympy.core.containers import Tuple
from sympy.core.decorators import deprecated
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices import eye
class TIDS(CantSympify):
"""
Tensor-index data structure. This contains internal data structures about
components of a tensor expression, its free and dummy indices.
To create a ``TIDS`` object via the standard constructor, the required
arguments are
WARNING: this class is meant as an internal representation of tensor data
structures and should not be directly accessed by end users.
Parameters
==========
components : ``TensorHead`` objects representing the components of the tensor expression.
free : Free indices in their internal representation.
dum : Dummy indices in their internal representation.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS([T], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
Notes
=====
In short, this has created the components, free and dummy indices for
the internal representation of a tensor T(m0, m1, -m1, m3).
Free indices are represented as a list of triplets. The elements of
each triplet identify a single free index and are
1. TensorIndex object
2. position inside the component
3. component number
Dummy indices are represented as a list of 4-plets. Each 4-plet stands
for couple for contracted indices, their original TensorIndex is not
stored as it is no longer required. The four elements of the 4-plet
are
1. position inside the component of the first index.
2. position inside the component of the second index.
3. component number of the first index.
4. component number of the second index.
"""
def __init__(self, components, free, dum):
self.components = components
self.free = free
self.dum = dum
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: (x[2], x[0]))
def get_tensors(self):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
indices = self.get_indices()
components = self.components
tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def get_components_with_free_indices(self):
"""
Get a list of components with their associated indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> t = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> t.get_components_with_free_indices()
[(T(Lorentz,Lorentz,Lorentz,Lorentz), [(m0, 0, 0), (m3, 3, 0)])]
>>> t2 = (A(m0)*A(-m0))._tids
>>> t2.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [])]
>>> t3 = (A(m0)*A(-m1)*A(-m0)*A(m1))._tids
>>> t3.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), [])]
>>> t4 = (A(m0)*A(m1)*A(-m0))._tids
>>> t4.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [])]
>>> t5 = (A(m0)*A(m1)*A(m2))._tids
>>> t5.get_components_with_free_indices()
[(A(Lorentz), [(m0, 0, 0)]), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [(m2, 0, 2)])]
"""
components = self.components
ret_comp = []
free_counter = 0
if len(self.free) == 0:
return [(comp, []) for comp in components]
for i, comp in enumerate(components):
c_free = []
while free_counter < len(self.free):
if not self.free[free_counter][2] == i:
break
c_free.append(self.free[free_counter])
free_counter += 1
if free_counter >= len(self.free):
break
ret_comp.append((comp, c_free))
return ret_comp
@staticmethod
def from_components_and_indices(components, indices):
"""
Create a new ``TIDS`` object from ``components`` and ``indices``
``components`` ``TensorHead`` objects representing the components
of the tensor expression.
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
In case of many components the same indices have slightly different
indexes:
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> TIDS.from_components_and_indices([A]*4, [m0, m1, -m1, m3])
TIDS([A(Lorentz), A(Lorentz), A(Lorentz), A(Lorentz)], [(m0, 0, 0), (m3, 0, 3)], [(0, 0, 1, 2)])
"""
tids = None
cur_pos = 0
for i in components:
tids_sing = TIDS([i], *TIDS.free_dum_from_indices(*indices[cur_pos:cur_pos+i.rank]))
if tids is None:
tids = tids_sing
else:
tids *= tids_sing
cur_pos += i.rank
if tids is None:
tids = TIDS([], [], [])
tids.free.sort(key=lambda x: x[0].name)
tids.dum.sort()
return tids
@deprecated(useinstead="get_indices")
def to_indices(self):
return self.get_indices()
@staticmethod
def free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> TIDS.free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0, 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index._name
typ = index._tensortype
contr = index._is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos, 0, 0))
else:
dum.append((pos, i, 0, 0))
else:
index_dict[(name, typ)] = index._is_up, i
free = [(index, i, 0) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
@staticmethod
def _check_matrix_indices(f_free, g_free, nc1):
# This "private" method checks matrix indices.
# Matrix indices are special as there are only two, and observe
# anomalous substitution rules to determine contractions.
dum = []
# make sure that free indices appear in the same order as in their component:
f_free.sort(key=lambda x: (x[2], x[1]))
g_free.sort(key=lambda x: (x[2], x[1]))
matrix_indices_storage = {}
transform_right_to_left = {}
f_pop_pos = []
g_pop_pos = []
for free_pos, (ind, i, c) in enumerate(f_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
matrix_indices_storage[ind] = (free_pos, i, c)
for free_pos, (ind, i, c) in enumerate(g_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
if ind == index_type.auto_left:
if -index_type.auto_right in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(-index_type.auto_right)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
continue
if ind in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(ind)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
transform_right_to_left[-index_type.auto_right] = c
continue
if ind in transform_right_to_left:
other_c = transform_right_to_left.pop(ind)
if c == other_c:
g_free[free_pos] = (index_type.auto_left, i, c)
for i in reversed(sorted(f_pop_pos)):
f_free.pop(i)
for i in reversed(sorted(g_pop_pos)):
g_free.pop(i)
return dum
@staticmethod
def mul(f, g):
"""
The algorithms performing the multiplication of two ``TIDS`` instances.
In short, it forms a new ``TIDS`` object, joining components and indices,
checking that abstract indices are compatible, and possibly contracting
them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TIDS, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> tids_1 = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> tids_2 = TIDS.from_components_and_indices([A], [m2])
>>> tids_1 * tids_2
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0), (m3, 3, 0), (m2, 0, 1)], [(1, 2, 0, 0)])
In this case no contraction has been performed.
>>> tids_3 = TIDS.from_components_and_indices([A], [-m3])
>>> tids_1 * tids_3
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],\
[(m0, 0, 0)], [(1, 2, 0, 0), (3, 0, 0, 1)])
Free indices ``m3`` and ``-m3`` are identified as a contracted couple, and are
therefore transformed into dummy indices.
A wrong index construction (for example, trying to contract two
contravariant indices or using indices multiple times) would result in
an exception:
>>> tids_4 = TIDS.from_components_and_indices([A], [m3])
>>> # This raises an exception:
>>> # tids_1 * tids_4
"""
index_up = lambda u: u if u.is_up else -u
# lambda returns True is index is not a matrix index:
notmat = lambda i: i not in (i._tensortype.auto_left, -i._tensortype.auto_right)
f_free = f.free[:]
g_free = g.free[:]
nc1 = len(f.components)
dum = TIDS._check_matrix_indices(f_free, g_free, nc1)
# find out which free indices of f and g are contracted
free_dict1 = dict([(i if i.is_up else -i, (pos, cpos, i)) for i, pos, cpos in f_free])
free_dict2 = dict([(i if i.is_up else -i, (pos, cpos, i)) for i, pos, cpos in g_free])
free_names = set(free_dict1.keys()) & set(free_dict2.keys())
# find the new `free` and `dum`
dum2 = [(i1, i2, c1 + nc1, c2 + nc1) for i1, i2, c1, c2 in g.dum]
free1 = [(ind, i, c) for ind, i, c in f_free if index_up(ind) not in free_names]
free2 = [(ind, i, c + nc1) for ind, i, c in g_free if index_up(ind) not in free_names]
free = free1 + free2
dum.extend(f.dum + dum2)
for name in free_names:
ipos1, cpos1, ind1 = free_dict1[name]
ipos2, cpos2, ind2 = free_dict2[name]
cpos2 += nc1
if ind1._is_up == ind2._is_up:
raise ValueError('wrong index construction {0}'.format(ind1))
if ind1._is_up:
new_dummy = (ipos1, ipos2, cpos1, cpos2)
else:
new_dummy = (ipos2, ipos1, cpos2, cpos1)
dum.append(new_dummy)
return (f.components + g.components, free, dum)
def __mul__(self, other):
return TIDS(*self.mul(self, other))
def __str__(self):
return "TIDS({0}, {1}, {2})".format(self.components, self.free, self.dum)
def __repr__(self):
return self.__str__()
def sorted_components(self):
"""
Returns a ``TIDS`` with sorted components
The sorting is done taking into account the commutation group
of the component tensors.
"""
from sympy.combinatorics.permutations import _af_invert
cv = list(zip(self.components, range(len(self.components))))
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1][0].commutes_with(cv[j][0])
if c not in [0, 1]:
continue
if (cv[j-1][0]._types, cv[j-1][0]._name) > \
(cv[j][0]._types, cv[j][0]._name):
cv[j-1], cv[j] = cv[j], cv[j-1]
if c:
sign = -sign
# perm_inv[new_pos] = old_pos
components = [x[0] for x in cv]
perm_inv = [x[1] for x in cv]
perm = _af_invert(perm_inv)
free = [(ind, i, perm[c]) for ind, i, c in self.free]
free.sort()
dum = [(i1, i2, perm[c1], perm[c2]) for i1, i2, c1, c2 in self.dum]
dum.sort(key=lambda x: components[x[2]].index_types[x[0]])
return TIDS(components, free, dum), sign
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: (x[2], x[0]))
def canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py``
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
# types = list(set(self._types))
# types.sort(key = lambda x: x._name)
n = self._ext_rank
g = [None]*n + [n, n+1]
pos = 0
vpos = []
components = self.components
for t in components:
vpos.append(pos)
pos += t._rank
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos, cpos) in enumerate(self._get_sorted_free_indices_for_canon()):
pos = vpos[cpos] + ipos
g[pos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2, cpos1, cpos2 in self._get_sorted_dum_indices_for_canon():
pos1 = vpos[cpos1] + ipos1
pos2 = vpos[cpos2] + ipos2
g[pos1] = j
g[pos2] = j + 1
j += 2
typ = components[cpos1].index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(typ.metric_antisym)
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h._comm == 0 or h._comm == 1:
comm = h._comm
else:
comm = TensorManager.get_comm(h._comm, h._comm)
v.append((h._symmetry.base, h._symmetry.generators, n, comm))
return _af_new(g), dummies, msym, v
def perm2tensor(self, g, canon_bp=False):
"""
Returns a ``TIDS`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
vpos = []
components = self.components
pos = 0
for t in components:
vpos.append(pos)
pos += t._rank
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*4 for i in range((rank - nfree)//2)]
free = []
icomp = -1
for i in range(rank):
if i in vpos:
icomp += vpos.count(i)
pos0 = i
ipos = i - pos0
gi = g[i]
if gi < nfree:
ind = sorted_free[gi]
free.append((ind, ipos, icomp))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = ipos
dum[idum][3] = icomp
else:
dum[idum][0] = ipos
dum[idum][2] = icomp
dum = [tuple(x) for x in dum]
return TIDS(components, free, dum)
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
components = self.components
free = self.free
dum = self.dum
indices = [None]*self._ext_rank
start = 0
pos = 0
vpos = []
for t in components:
vpos.append(pos)
pos += t.rank
cdt = defaultdict(int)
# if the free indices have names with dummy_fmt, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos, cpos in free:
if indx._name.split('_')[0] == indx._tensortype._dummy_fmt[:-3]:
cdt[indx._tensortype] = max(cdt[indx._tensortype], int(indx._name.split('_')[1]) + 1)
start = vpos[cpos]
indices[start + ipos] = indx
for ipos1, ipos2, cpos1, cpos2 in dum:
start1 = vpos[cpos1]
start2 = vpos[cpos2]
typ1 = components[cpos1].index_types[ipos1]
assert typ1 == components[cpos2].index_types[ipos2]
fmt = typ1._dummy_fmt
nd = cdt[typ1]
indices[start1 + ipos1] = TensorIndex(fmt % nd, typ1)
indices[start2 + ipos2] = TensorIndex(fmt % nd, typ1, False)
cdt[typ1] += 1
return indices
def contract_metric(self, g):
"""
Returns new TIDS and sign.
Sign is either 1 or -1, to correct the sign after metric contraction
(for spinor indices).
"""
components = self.components
antisym = g.index_types[0].metric_antisym
#if not any(x == g for x in components):
# return self
# list of positions of the metric ``g``
gpos = [i for i, x in enumerate(components) if x == g]
if not gpos:
return self, 1
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if x[-1] == gposx]
dum1 = [x for x in dum if x[-2] == gposx or x[-1] == gposx]
if not dum1:
continue
elim.add(gposx)
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
else:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
c1 = dc0
ind, p, c = free1[0]
free.append((ind, p1, c1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
shift = 0
shifts = [0]*len(components)
for i in range(len(components)):
if i in elim:
shift += 1
continue
shifts[i] = shift
free = [(ind, p, c - shifts[c]) for (ind, p, c) in free if c not in elim]
dum = [(p0, p1, c0 - shifts[c0], c1 - shifts[c1]) for i, (p0, p1, c0, c1) in enumerate(dum) if c0 not in elim and c1 not in elim]
components = [c for i, c in enumerate(components) if i not in elim]
tids = TIDS(components, free, dum)
return tids, sign
class VTIDS(TIDS):
"""
DEPRECATED: DO NOT USE.
"""
@deprecated(useinstead="TIDS")
def __init__(self, components, free, dum, data):
super(VTIDS, self).__init__(components, free, dum)
self.data = data
@staticmethod
@deprecated(useinstead="TIDS")
def parse_data(data):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator.parse_data(data)
@deprecated(useinstead="TIDS")
def correct_signature_from_indices(self, data, indices, free, dum):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator._correct_signature_from_indices(data, indices, free, dum)
@staticmethod
@deprecated(useinstead="TIDS")
def flip_index_by_metric(data, metric, pos):
"""
DEPRECATED: DO NOT USE.
"""
return _TensorDataLazyEvaluator._flip_index_by_metric(data, metric, pos)
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict()
_substitutions_dict_tensmul = dict()
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
numpy = import_module("numpy")
if not isinstance(dat, numpy.ndarray):
return dat
if dat.ndim == 0:
return dat[()]
elif dat.ndim == 1 and dat.size == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
return self.data_tensmul_from_tensorhead(key, key.component)
if isinstance(key, TensMul):
tensmul_list = key.split()
if len(tensmul_list) == 1 and len(tensmul_list[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_list[0].get_indices()])
srch = (tensmul_list[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
data_list = [self.data_tensmul_from_tensorhead(i, i.components[0]) for i in tensmul_list]
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result, tensmul_result = self.data_product_tensors(data_list, tensmul_list)
return data_result
if isinstance(key, TensAdd):
sumvar = S.Zero
data_list = [i.data for i in key.args]
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
for i in data_list:
sumvar += i
return sumvar
return None
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_tensmul_from_tensorhead(self, tensmul, tensorhead):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum)
def data_product_tensors(self, data_list, tensmul_list):
"""
Given a ``data_list``, list of ``ndarray``'s and a ``tensmul_list``,
list of ``TensMul`` instances, compute the resulting ``ndarray``,
after tensor products and contractions.
"""
def data_mul(f, g):
"""
Multiplies two ``ndarray`` objects, it first calls ``TIDS.mul``,
then checks which indices have been contracted, and finally
contraction operation on data, according to the contracted indices.
"""
data1, tensmul1 = f
data2, tensmul2 = g
components, free, dum = TIDS.mul(tensmul1, tensmul2)
data = _TensorDataLazyEvaluator._contract_ndarray(tensmul1.free, tensmul2.free, data1, data2)
# TODO: do this more efficiently... maybe by just passing an index list
# to .data_product_tensor(...)
return data, TensMul.from_TIDS(S.One, TIDS(components, free, dum))
return reduce(data_mul, zip(data_list, tensmul_list))
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
import numpy
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = numpy.transpose(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if (last_data - sign_change*data_swapped).any():
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if indextype.dim is None:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
@staticmethod
def _contract_ndarray(free1, free2, ndarray1, ndarray2):
numpy = import_module('numpy')
def ikey(x):
return x[1:]
free1 = free1[:]
free2 = free2[:]
free1.sort(key=ikey)
free2.sort(key=ikey)
self_free = [_[0] for _ in free1]
axes1 = []
axes2 = []
for jpos, jindex in enumerate(free2):
if -jindex[0] in self_free:
nidx = self_free.index(-jindex[0])
else:
continue
axes1.append(nidx)
axes2.append(jpos)
contracted_ndarray = numpy.tensordot(
ndarray1,
ndarray2,
(axes1, axes2)
)
return contracted_ndarray
@staticmethod
def add_tensor_mul(prod, f, g):
def mul_function():
return _TensorDataLazyEvaluator._contract_ndarray(f.free, g.free, f.data, g.data)
_TensorDataLazyEvaluator._substitutions_dict[prod] = mul_function()
@staticmethod
def add_tensor_add(addition, f, g):
def add_function():
return f.data + g.data
_TensorDataLazyEvaluator._substitutions_dict[addition] = add_function()
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the traspose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = Matrix(data)
invt = Matrix(inverse_transpose)
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
numpy = import_module('numpy')
data = numpy.tensordot(
metric,
data,
(1, pos))
return numpy.rollaxis(data, 0, pos+1)
@staticmethod
def inverse_matrix(ndarray):
m = Matrix(ndarray).inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = Matrix(ndarray).inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
numpy = import_module('numpy')
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx._tensortype.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx._tensortype.data),
i
)
if len(dum) > 0:
### perform contractions ###
axes1 = []
axes2 = []
for i, indx1 in enumerate(indices):
try:
nd = indices[:i].index(-indx1)
except ValueError:
continue
axes1.append(nd)
axes2.append(i)
for ax1, ax2 in zip(axes1, axes2):
data = numpy.trace(data, axis1=ax1, axis2=ax2)
return data
@staticmethod
def _sort_data_axes(old, new):
numpy = import_module('numpy')
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = numpy.swapaxes(new_data, i, j)
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
@doctest_depends_on(modules=('numpy',))
def parse_data(data):
"""
Transform ``data`` to a numpy ndarray. The parameter ``data`` may
contain data in various formats, e.g. nested lists, sympy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1 3 -6 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1 2]
[4 7]]
"""
numpy = import_module('numpy')
if (numpy is not None) and (not isinstance(data, numpy.ndarray)):
if len(data) == 2 and hasattr(data[0], '__call__'):
def fromfunction_sympify(*x):
return sympify(data[0](*x))
data = numpy.fromfunction(fromfunction_sympify, data[1])
else:
vsympify = numpy.vectorize(sympify)
data = vsympify(numpy.array(data))
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager(object):
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensorManager
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> G = tensorhead('G', [Lorentz], [[1]], 'Gcomm')
>>> GH = tensorhead('GH', [Lorentz], [[1]], 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
@doctest_depends_on(modules=('numpy',))
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
metric : metric symmetry or metric object or ``None``
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
dummy_fmt : name of the head of dummy indices
Attributes
==========
``name``
``metric_name`` : it is 'metric' or metric.name
``metric_antisym``
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``dim``
``dim_eps``
``dummy_fmt``
``data`` : a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The ``metric`` parameter can be:
``metric = False`` symmetric metric (in Riemannian geometry)
``metric = True`` antisymmetric metric (for spinor calculus)
``metric = None`` there is no metric
``metric`` can be an object having ``name`` and ``antisym`` attributes.
If there is a metric the metric is used to raise and lower indices.
In the case of antisymmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
``g(-a, b) = delta(-a, b); g(b, -a) = -delta(a, -b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
Examples with metric components data added, this means it is working on a
fixed basis:
>>> Lorentz.data = [1, -1, -1, -1]
>>> Lorentz
TensorIndexType(Lorentz, 0)
>>> Lorentz.data
[[1 0 0 0]
[0 -1 0 0]
[0 0 -1 0]
[0 0 0 -1]]
"""
def __new__(cls, name, metric=False, dim=None, eps_dim=None,
dummy_fmt=None):
if isinstance(name, string_types):
name = Symbol(name)
obj = Basic.__new__(cls, name, S.One if metric else S.Zero)
obj._name = str(name)
if not dummy_fmt:
obj._dummy_fmt = '%s_%%d' % obj.name
else:
obj._dummy_fmt = '%s_%%d' % dummy_fmt
if metric is None:
obj.metric_antisym = None
obj.metric = None
else:
if metric in (True, False, 0, 1):
metric_name = 'metric'
obj.metric_antisym = metric
else:
metric_name = metric.name
obj.metric_antisym = metric.antisym
sym2 = TensorSymmetry(get_symmetric_group_sgs(2, obj.metric_antisym))
S2 = TensorType([obj]*2, sym2)
obj.metric = S2(metric_name)
obj.metric._matrix_behavior = True
obj._dim = dim
obj._delta = obj.get_kronecker_delta()
obj._eps_dim = eps_dim if eps_dim else dim
obj._epsilon = obj.get_epsilon()
obj._autogenerated = []
return obj
@property
def auto_right(self):
if not hasattr(self, '_auto_right'):
self._auto_right = TensorIndex("auto_right", self)
return self._auto_right
@property
def auto_left(self):
if not hasattr(self, '_auto_left'):
self._auto_left = TensorIndex("auto_left", self)
return self._auto_left
@property
def auto_index(self):
if not hasattr(self, '_auto_index'):
self._auto_index = TensorIndex("auto_index", self)
return self._auto_index
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
numpy = import_module('numpy')
data = _TensorDataLazyEvaluator.parse_data(data)
if data.ndim > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.ndim == 1:
if self.dim is not None:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = numpy.zeros((dim, dim), dtype=object)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim is not None:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@property
def name(self):
return self._name
@property
def dim(self):
return self._dim
@property
def delta(self):
return self._delta
@property
def eps_dim(self):
return self._eps_dim
@property
def epsilon(self):
return self._epsilon
@property
def dummy_fmt(self):
return self._dummy_fmt
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
S2 = TensorType([self]*2, sym2)
delta = S2('KD')
delta._matrix_behavior = True
return delta
def get_epsilon(self):
if not isinstance(self._eps_dim, int):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
Sdim = TensorType([self]*self._eps_dim, sym)
epsilon = Sdim('Eps')
return epsilon
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
@doctest_depends_on(modules=('numpy',))
class TensorIndex(Basic):
"""
Represents an abstract tensor index.
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensortype : ``TensorIndexType`` of the index
is_up : flag for contravariant index
Attributes
==========
``name``
``tensortype``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name.
Dummy indices have a name with head given by ``tensortype._dummy_fmt``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i = TensorIndex('i', Lorentz); i
i
>>> sym1 = TensorSymmetry(*get_symmetric_group_sgs(1))
>>> S1 = TensorType([Lorentz], sym1)
>>> A, B = S1('A,B')
>>> A(i)*B(-i)
A(L_0)*B(-L_0)
If you want the index name to be automatically assigned, just put ``True``
in the ``name`` field, it will be generated using the reserved character
``_`` in front of its name, in order to avoid conflicts with possible
existing indices:
>>> i0 = TensorIndex(True, Lorentz)
>>> i0
_i0
>>> i1 = TensorIndex(True, Lorentz)
>>> i1
_i1
>>> A(i0)*B(-i1)
A(_i0)*B(-_i1)
>>> A(i0)*B(-i0)
A(L_0)*B(-L_0)
"""
def __new__(cls, name, tensortype, is_up=True):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{0}".format(len(tensortype._autogenerated))
name_symbol = Symbol(name)
tensortype._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
obj = Basic.__new__(cls, name_symbol, tensortype, S.One if is_up else S.Zero)
obj._name = str(name)
obj._tensortype = tensortype
obj._is_up = is_up
return obj
@property
def name(self):
return self._name
@property
def tensortype(self):
return self._tensortype
@property
def is_up(self):
return self._is_up
def _print(self):
s = self._name
if not self._is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return (self._tensortype, self._name) < (other._tensortype, other._name)
def __neg__(self):
t1 = TensorIndex(self._name, self._tensortype,
(not self._is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : list of ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
@doctest_depends_on(modules=('numpy',))
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor,
are not covered.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
obj = Basic.__new__(cls, base, generators, **kw_args)
return obj
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.args[1][0].size - 2
def tensorsymmetry(*args):
"""
Return a ``TensorSymmetry`` object.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
Examples
========
Symmetric tensor using a Young tableau
>>> from sympy.tensor.tensor import TensorIndexType, TensorType, tensorsymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
Symmetric tensor using a ``BSGS`` (base, strong generator set)
>>> from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
>>> sym2 = tensorsymmetry(*get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
@doctest_depends_on(modules=('numpy',))
class TensorType(Basic):
"""
Class of tensor types.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0, matrix_behavior=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
Examples
========
Define symmetric tensors ``V``, ``W`` and ``G``, respectively
commuting, anticommuting and with no commutation symmetry
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorsymmetry, TensorType, canon_bp
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
>>> W = S2('W', 1)
>>> G = S2('G', 2)
>>> canon_bp(V(a, b)*V(-b, -a))
V(L_0, L_1)*V(-L_0, -L_1)
>>> canon_bp(W(a, b)*W(-b, -a))
0
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self, comm, matrix_behavior=matrix_behavior)
else:
return [TensorHead(name, self, comm, matrix_behavior=matrix_behavior) for name in names]
def tensorhead(name, typ, sym, comm=0, matrix_behavior=0):
"""
Function generating tensorhead(s).
Parameters
==========
name : name or sequence of names (as in ``symbol``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> A(a, -b)
A(a, -b)
"""
sym = tensorsymmetry(*sym)
S = TensorType(typ, sym)
th = S(name, comm, matrix_behavior=matrix_behavior)
return th
@doctest_depends_on(modules=('numpy',))
class TensorHead(Basic):
r"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
typ : list of TensorIndexType
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank``
``types`` : equal to ``typ.types``
``symmetry`` : equal to ``typ.symmetry``
``comm`` : commutation group
Notes
=====
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> A = tensorhead('A', [Lorentz, Lorentz], [[1],[1]])
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+2*i for j in range(4)] for i in range(4)]
in order to retrieve data, it is also necessary to specify abstract indices
enclosed by round brackets, then numerical indices inside square brackets.
>>> A(i0, i1)[0, 0]
0
>>> A(i0, i1)[2, 3] == 3+2*2
True
Notice that square brackets create a valued tensor expression instance:
>>> A(i0, i1)
A(i0, i1)
To view the data, just type:
>>> A.data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
Turning to a tensor expression, covariant indices get the corresponding
components data corrected by the metric:
>>> A(i0, -i1).data
[[0 -1 -2 -3]
[2 -3 -4 -5]
[4 -5 -6 -7]
[6 -7 -8 -9]]
>>> A(-i0, -i1).data
[[0 -1 -2 -3]
[-2 3 4 5]
[-4 5 6 7]
[-6 7 8 9]]
while if all indices are contravariant, the ``ndarray`` remains the same
>>> A(i0, i1).data
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
When all indices are contracted and components data are added to the tensor,
accessing the data will return a scalar, no numpy object. In fact, numpy
ndarrays are dropped to scalars if they contain only one element.
>>> A(i0, -i0)
A(L_0, -L_0)
>>> A(i0, -i0).data
-18
It is also possible to assign components data to an indexed tensor, i.e. a
tensor with specified covariant and contravariant components. In this
example, the covariant components data of the Electromagnetic tensor are
injected into `A`:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor, we have to assign an
antisymmetric matrix to it, because `[[2]]` stands for the Young tableau
representation of an antisymmetric set of two elements:
>>> F = tensorhead('A', [Lorentz, Lorentz], [[2]])
>>> F(-i0, -i1).data = [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).data
[[0 -E_x/c -E_y/c -E_z/c]
[E_x/c 0 -B_z B_y]
[E_y/c B_z 0 -B_x]
[E_z/c -B_y B_x 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).data
[[0 E_x/c E_y/c E_z/c]
[E_x/c 0 B_z -B_y]
[E_y/c -B_z 0 B_x]
[E_z/c B_y -B_x 0]]
To convert the numpy's ndarray to a sympy matrix, just cast:
>>> from sympy import Matrix
>>> Matrix(F.data)
Matrix([
[ 0, -E_x/c, -E_y/c, -E_z/c],
[E_x/c, 0, -B_z, B_y],
[E_y/c, B_z, 0, -B_x],
[E_z/c, -B_y, B_x, 0]])
Still notice, in this last example, that accessing components data from a
tensor without specifying the indices is equivalent to assume that all
indices are contravariant.
It is also possible to store symbolic components data inside a tensor, for
example, define a four-momentum-like tensor:
>>> from sympy import symbols
>>> P = tensorhead('P', [Lorentz], [[1]])
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> P.data = [E, px, py, pz]
The contravariant and covariant components are, respectively:
>>> P(i0).data
[E p_x p_y p_z]
>>> P(-i0).data
[E -p_x -p_y -p_z]
The contraction of a 1-index tensor by itself is usually indicated by a
power by two:
>>> P(i0)**2
E**2 - p_x**2 - p_y**2 - p_z**2
As the power by two is clearly identical to `P_\mu P^\mu`, it is possible to
simply contract the ``TensorHead`` object, without specifying the indices
>>> P**2
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, typ, comm=0, matrix_behavior=0, **kw_args):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
comm2i = TensorManager.comm_symbols2i(comm)
obj = Basic.__new__(cls, name_symbol, typ, **kw_args)
obj._matrix_behavior = matrix_behavior
obj._name = obj.args[0].name
obj._rank = len(obj.index_types)
obj._types = typ.types
obj._symmetry = typ.symmetry
obj._comm = comm2i
return obj
@property
def name(self):
return self._name
@property
def rank(self):
return self._rank
@property
def types(self):
return self._types[:]
@property
def symmetry(self):
return self._symmetry
@property
def typ(self):
return self.args[1]
@property
def comm(self):
return self._comm
@property
def index_types(self):
return self.args[1].index_types[:]
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self._comm, other._comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def _check_auto_matrix_indices_in_call(self, *indices):
matrix_behavior_kinds = dict()
if len(indices) != len(self.index_types):
if not self._matrix_behavior:
raise ValueError('wrong number of indices')
# Take the last one or two missing
# indices as auto-matrix indices:
ldiff = len(self.index_types) - len(indices)
if ldiff > 2:
raise ValueError('wrong number of indices')
if ldiff == 2:
mat_ind = [len(indices), len(indices) + 1]
elif ldiff == 1:
mat_ind = [len(indices)]
not_equal = True
else:
not_equal = False
mat_ind = [i for i, e in enumerate(indices) if e is True]
if mat_ind:
not_equal = True
indices = tuple([_ for _ in indices if _ is not True])
for i, el in enumerate(indices):
if not isinstance(el, TensorIndex):
not_equal = True
break
if el._tensortype != self.index_types[i]:
not_equal = True
break
if not_equal:
for el in mat_ind:
eltyp = self.index_types[el]
if eltyp in matrix_behavior_kinds:
elind = -self.index_types[el].auto_right
matrix_behavior_kinds[eltyp].append(elind)
else:
elind = self.index_types[el].auto_left
matrix_behavior_kinds[eltyp] = [elind]
indices = indices[:el] + (elind,) + indices[el:]
return indices, matrix_behavior_kinds
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> t = A(a, -b)
>>> t
A(a, -b)
To use the auto-matrix index behavior, just put a ``True`` on the
desired index position.
>>> r = A(True, True)
>>> r
A(auto_left, -auto_right)
Here ``auto_left`` and ``auto_right`` are automatically generated
tensor indices, they are only two for every ``TensorIndexType`` and
can be assigned to just one or two indices of a given type.
Auto-matrix indices can be assigned many times in a tensor, if indices
are of different ``TensorIndexType``
>>> Spinor = TensorIndexType('Spinor', dummy_fmt='S')
>>> B = tensorhead('B', [Lorentz, Lorentz, Spinor, Spinor], [[1]*4])
>>> s = B(True, True, True, True)
>>> s
B(auto_left, -auto_right, auto_left, -auto_right)
Here, ``auto_left`` and ``auto_right`` are repeated twice, but they are
not the same indices, as they refer to different ``TensorIndexType``s.
Auto-matrix indices are automatically contracted upon multiplication,
>>> r*s
A(auto_left, L_0)*B(-L_0, -auto_right, auto_left, -auto_right)
The multiplication algorithm has found an ``auto_right`` index in ``A``
and an ``auto_left`` index in ``B`` referring to the same
``TensorIndexType`` (``Lorentz``), so they have been contracted.
Auto-matrix indices can be accessed from the ``TensorIndexType``:
>>> Lorentz.auto_right
auto_right
>>> Lorentz.auto_left
auto_left
There is a special case, in which the ``True`` parameter is not needed
to declare an auto-matrix index, i.e. when the matrix behavior has been
declared upon ``TensorHead`` construction, in that case the last one or
two tensor indices may be omitted, so that they automatically become
auto-matrix indices:
>>> C = tensorhead('C', [Lorentz, Lorentz], [[1]*2], matrix_behavior=True)
>>> C()
C(auto_left, -auto_right)
"""
indices, matrix_behavior_kinds = self._check_auto_matrix_indices_in_call(*indices)
tensor = Tensor._new_with_dummy_replacement(self, indices, **kw_args)
return tensor
def __pow__(self, other):
if self.data is None:
raise ValueError("No power on abstract tensors.")
numpy = import_module('numpy')
metrics = [_.data for _ in self.args[1].args[0]]
marray = self.data
for metric in metrics:
marray = numpy.tensordot(marray, numpy.tensordot(metric, marray, (1, 0)), (0, 0))
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
return self.data.flatten().__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
@doctest_depends_on(modules=('numpy',))
class TensExpr(Basic):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensAdd`` objects are put in canonical form using the Butler-Portugal
algorithm for canonicalization under monoterm symmetries.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 11.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __sub__(self, other):
raise NotImplementedError
def __rsub__(self, other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self, other):
raise NotImplementedError
def __pow__(self, other):
if self.data is None:
raise ValueError("No power without ndarray data.")
numpy = import_module('numpy')
free = self.free
marray = self.data
for metric in free:
marray = numpy.tensordot(
marray,
numpy.tensordot(
metric[0]._tensortype.data,
marray,
(1, 0)
),
(0, 0)
)
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
def __rpow__(self, other):
raise NotImplementedError
def __div__(self, other):
raise NotImplementedError
def __rdiv__(self, other):
raise NotImplementedError()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@doctest_depends_on(modules=('numpy',))
def get_matrix(self):
"""
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> from sympy import ones
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> A = S2('A')
The tensor ``A`` is symmetric in its indices, as can be deduced by the
``[1, 1]`` Young tableau when constructing `sym2`. One has to be
careful to assign symmetric component data to ``A``, as the symmetry
properties of data are currently not checked to be compatible with the
defined tensor symmetry.
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+i for j in range(4)] for i in range(4)]
>>> A(i0, i1).get_matrix()
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
It is possible to perform usual operation on matrices, such as the
matrix multiplication:
>>> A(i0, i1).get_matrix()*ones(4, 1)
Matrix([
[ 6],
[10],
[14],
[18]])
"""
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
def _eval_simplify(self, ratio, measure):
# this is a way to simplify a tensor expression.
# This part walks for all `TensorHead`s appearing in the tensor expr
# and looks for `simplify_this_type`, to specifically act on a subexpr
# containing one type of `TensorHead` instance only:
expr = self
for i in list(set(self.components)):
if hasattr(i, 'simplify_this_type'):
expr = i.simplify_this_type(expr)
# TODO: missing feature, perform metric contraction.
return expr
@doctest_depends_on(modules=('numpy',))
class TensAdd(TensExpr):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Notes
=====
Sum of more than one tensor are put automatically in canonical form.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorhead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
Examples with components data added to the tensor expression:
>>> from sympy import eye
>>> Lorentz.data = [1, -1, -1, -1]
>>> a, b = tensor_indices('a, b', Lorentz)
>>> p.data = [2, 3, -2, 7]
>>> q.data = [2, 3, -2, 7]
>>> t = p(a) + q(a); t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> (p(a)*p(-a)).data
-58
>>> p(a)**2
-58
"""
def __new__(cls, *args, **kw_args):
args = [sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# replace auto-matrix indices so that they are the same in all addends
args = TensAdd._tensAdd_check_automatrix(args)
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 TensMul element in its `args`:
if len(args) == 1 and isinstance(args[0], TensMul):
obj = Basic.__new__(cls, *args, **kw_args)
return obj
# TODO: do not or do canonicalize by default?
# Technically, one may wish to have additions of non-canonicalized
# tensors. This feature should be removed in the future.
# Unfortunately this would require to rewrite a lot of tests.
# canonicalize all TensMul
args = [canon_bp(x) for x in args if x]
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# collect canonicalized terms
def sort_key(t):
x = get_tids(t)
return (x.components, x.free, x.dum)
args.sort(key=sort_key)
args = TensAdd._tensAdd_collect_terms(args)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args, **kw_args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
if not all(isinstance(x, TensExpr) for x in args):
args1 = []
for x in args:
if isinstance(x, TensExpr):
if isinstance(x, TensAdd):
args1.extend(list(x.args))
else:
args1.append(x)
args1 = [x for x in args1 if isinstance(x, TensExpr) and x.coeff]
args2 = [x for x in args if not isinstance(x, TensExpr)]
t1 = TensMul.from_data(Add(*args2), [], [], [])
args = [t1] + args1
a = []
for x in args:
if isinstance(x, TensAdd):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check_automatrix(args):
# check that all automatrix indices are the same.
# if there are no addends, just return.
if not args:
return args
# @type auto_left_types: set
auto_left_types = set([])
auto_right_types = set([])
args_auto_left_types = []
args_auto_right_types = []
for i, arg in enumerate(args):
arg_auto_left_types = set([])
arg_auto_right_types = set([])
for index in get_indices(arg):
# @type index: TensorIndex
if index in (index._tensortype.auto_left, -index._tensortype.auto_left):
auto_left_types.add(index._tensortype)
arg_auto_left_types.add(index._tensortype)
if index in (index._tensortype.auto_right, -index._tensortype.auto_right):
auto_right_types.add(index._tensortype)
arg_auto_right_types.add(index._tensortype)
args_auto_left_types.append(arg_auto_left_types)
args_auto_right_types.append(arg_auto_right_types)
for arg, aas_left, aas_right in zip(args, args_auto_left_types, args_auto_right_types):
missing_left = auto_left_types - aas_left
missing_right = auto_right_types - aas_right
missing_intersection = missing_left & missing_right
for j in missing_intersection:
args[i] *= j.delta(j.auto_left, -j.auto_right)
if missing_left != missing_right:
raise ValueError("cannot determine how to add auto-matrix indices on some args")
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
indices0 = set([x[0] for x in get_tids(args[0]).free])
list_indices = [set([y[0] for y in get_tids(x).free]) for x in args[1:]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
a = []
prev = args[0]
prev_coeff = get_coeff(prev)
changed = False
for x in args[1:]:
# if x and prev have the same tensor, update the coeff of prev
x_tids = get_tids(x)
prev_tids = get_tids(prev)
if x_tids.components == prev_tids.components \
and x_tids.free == prev_tids.free and x_tids.dum == prev_tids.dum:
prev_coeff = prev_coeff + get_coeff(x)
changed = True
op = 0
else:
# x and prev are different; if not changed, prev has not
# been updated; store it
if not changed:
a.append(prev)
else:
# get a tensor from prev with coeff=prev_coeff and store it
if prev_coeff:
t = TensMul.from_data(prev_coeff, prev_tids.components,
prev_tids.free, prev_tids.dum)
a.append(t)
# move x to prev
op = 1
pprev, prev = prev, x
pprev_coeff, prev_coeff = prev_coeff, get_coeff(x)
changed = False
# if the case op=0 prev was not stored; store it now
# in the case op=1 x was not stored; store it now (as prev)
if op == 0 and prev_coeff:
prev = TensMul.from_data(prev_coeff, prev_tids.components, prev_tids.free, prev_tids.dum)
a.append(prev)
elif op == 1:
a.append(prev)
return a
@property
def rank(self):
return self.args[0].rank
@property
def free_args(self):
return self.args[0].free_args
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Parameters
==========
indices
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> g = Lorentz.metric
>>> t = p(i0)*p(i1) + g(i0,i1)*q(i2)*q(-i2)
>>> t(i0,i2)
metric(i0, i2)*q(L_0)*q(-L_0) + p(i0)*p(i2)
>>> t(i0,i1) - t(i1,i0)
0
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.fun_eval(*index_tuples).args) for x in self.args]
res = TensAdd(*a)
return res
def canon_bp(self):
"""
canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
args = [x.canon_bp() for x in self.args]
res = TensAdd(*args)
return res
def equals(self, other):
other = sympify(other)
if isinstance(other, TensMul) and other._coeff == 0:
return all(x._coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t._coeff == 0
else:
return all(x._coeff == 0 for x in t.args)
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
return TensAdd(*(x*other for x in self.args))
def __rmul__(self, other):
return self*other
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensAdd(*(x/other for x in self.args))
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j) + A(i, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(l, -L_0) + A(k, l)
"""
args = self.args
args1 = []
for x in args:
y = x.fun_eval(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
args = self.args
args1 = []
for x in args:
y = x.substitute_indices(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
@staticmethod
def from_TIDS_list(coeff, tids_list):
"""
Given a list of coefficients and a list of ``TIDS`` objects, construct
a ``TensAdd`` instance, equivalent to the one that would result from
creating single instances of ``TensMul`` and then adding them.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, TensAdd
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j = tensor_indices('i,j', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> eA = 3*A(i, j)
>>> eB = 2*B(j, i)
>>> t1 = eA._tids
>>> t2 = eB._tids
>>> c1 = eA.coeff
>>> c2 = eB.coeff
>>> TensAdd.from_TIDS_list([c1, c2], [t1, t2])
2*B(i, j) + 3*A(i, j)
If the coefficient parameter is a scalar, then it will be applied
as a coefficient on all ``TIDS`` objects.
>>> TensAdd.from_TIDS_list(4, [t1, t2])
4*A(i, j) + 4*B(i, j)
"""
if not isinstance(coeff, (list, tuple, Tuple)):
coeff = [coeff] * len(tids_list)
tensmul_list = [TensMul.from_TIDS(c, t) for c, t in zip(coeff, tids_list)]
return TensAdd(*tensmul_list)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
@doctest_depends_on(modules=('numpy',))
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType("Lorentz", dummy_fmt="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = tensorhead("A", [Lorentz, Lorentz], [[1], [1]])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
"""
is_commutative = False
def __new__(cls, tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._tids = tids
obj._indices = indices
obj._is_canon_bp = kw_args.get('is_canon_bp', False)
return obj
@staticmethod
def _new_with_dummy_replacement(tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
indices = tids.get_indices()
return Tensor(tensor_head, indices, **kw_args)
@property
def is_canon_bp(self):
return self._is_canon_bp
@property
def indices(self):
return self._indices
@property
def free(self):
return self._tids.free
@property
def dum(self):
return self._tids.dum
@property
def rank(self):
return len(self.free)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
if self._is_canon_bp:
return self
g, dummies, msym, v = self._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
@property
def types(self):
return get_tids(self).components[0]._types
@property
def coeff(self):
return S.One
@property
def component(self):
return self.args[0]
@property
def components(self):
return [self.args[0]]
def split(self):
return [self]
def expand(self):
return self
def sorted_components(self):
return self
def get_indices(self):
"""
Get a list of indices, corresponding to those of the tensor.
"""
return self._tids.get_indices()
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz]*5, [[1]*5])
>>> t = A(i2, i1, -i2, -i3, i4)
>>> t
A(L_0, i1, -L_0, -i3, i4)
>>> t(i1, i2, i3)
A(L_0, i1, -L_0, i2, i3)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def fun_eval(self, *index_tuples):
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
# TODO: put this into TensExpr?
def __iter__(self):
return self.data.flatten().__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
return self.data[item]
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def __mul__(self, other):
if isinstance(other, TensAdd):
return TensAdd(*[self*arg for arg in other.args])
tmul = TensMul(self, other)
return tmul
def __rmul__(self, other):
return TensMul(other, self)
def __div__(self, other):
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other, is_canon_bp=self.is_canon_bp)
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, self)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __neg__(self):
return TensMul(S.NegativeOne, self)
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, metric):
tids, sign = get_tids(self).contract_metric(metric)
return TensMul.from_TIDS(sign, tids)
def contract_delta(self, metric):
return self.contract_metric(metric)
@doctest_depends_on(modules=('numpy',))
class TensMul(TensExpr):
"""
Product of tensors
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
def __new__(cls, *args, **kw_args):
# make sure everything is sympified:
args = [sympify(arg) for arg in args]
# flatten:
args = TensMul._flatten(args)
is_canon_bp = kw_args.get('is_canon_bp', False)
if not any([isinstance(arg, TensExpr) for arg in args]):
tids = TIDS([], [], [])
else:
tids_list = [arg._tids for arg in args if isinstance(arg, (Tensor, TensMul))]
if len(tids_list) == 1:
for arg in args:
if not isinstance(arg, Tensor):
continue
is_canon_bp = kw_args.get('is_canon_bp', arg._is_canon_bp)
tids = reduce(lambda a, b: a*b, tids_list)
if any([isinstance(arg, TensAdd) for arg in args]):
add_args = TensAdd._tensAdd_flatten(args)
return TensAdd(*add_args)
coeff = reduce(lambda a, b: a*b, [S.One] + [arg for arg in args if not isinstance(arg, TensExpr)])
args = tids.get_tensors()
if coeff != 1:
args = [coeff] + args
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args)
obj._types = []
for t in tids.components:
obj._types.extend(t._types)
obj._tids = tids
obj._ext_rank = len(obj._tids.free) + 2*len(obj._tids.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
@staticmethod
def _flatten(args):
a = []
for arg in args:
if isinstance(arg, TensMul):
a.extend(arg.args)
else:
a.append(arg)
return a
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
tids = TIDS(components, free, dum)
return TensMul.from_TIDS(coeff, tids, **kw_args)
@staticmethod
def from_TIDS(coeff, tids, **kw_args):
return TensMul(coeff, *tids.get_tensors(), **kw_args)
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._tids.components[:]
@property
def free(self):
return self._tids.free[:]
@property
def coeff(self):
return self._coeff
@property
def dum(self):
return self._tids.dum[:]
@property
def rank(self):
return len(self.free)
@property
def types(self):
return self._types[:]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self._coeff == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (get_coeff(t), tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
"""
return self._tids.get_indices()
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
other = sympify(other)
if not isinstance(other, TensExpr):
coeff = self.coeff*other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
if isinstance(other, TensAdd):
return TensAdd(*[self*x for x in other.args])
new_tids = self._tids*other._tids
coeff = self.coeff*other.coeff
tmul = TensMul.from_TIDS(coeff, new_tids)
return tmul
def __rmul__(self, other):
other = sympify(other)
coeff = other*self._coeff
tmul = TensMul.from_TIDS(coeff, self._tids)
return tmul
def __div__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
coeff = self._coeff/other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
return self.data[item]
__truediv__ = __div__
__truerdiv__ = __rdiv__
def sorted_components(self):
"""
Returns a tensor with sorted components
calling the corresponding method in a ``TIDS`` object.
"""
new_tids, sign = self._tids.sorted_components()
coeff = -self.coeff if sign == -1 else self.coeff
t = TensMul.from_TIDS(coeff, new_tids)
return t
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[2]])
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
if not self.components:
return self
t = self.sorted_components()
g, dummies, msym, v = t._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
tids, sign = get_tids(self).contract_metric(g)
res = TensMul.from_TIDS(sign*self.coeff, tids)
return res
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.fun_eval((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
def __call__(self, *indices):
"""Returns tensor product with ordered free indices replaced by ``indices``
Examples
========
>>> from sympy import Symbol
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p,q', [Lorentz], [[1]])
>>> t = p(i0)*q(i1)*q(-i1)
>>> t(i1)
p(i1)*q(L_0)*q(-L_0)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def _print(self):
args = self.args
get_str = lambda arg: str(arg) if arg.is_Atom or isinstance(arg, TensExpr) else ("(%s)" % str(arg))
if not args:
# no arguments is equivalent to "1", i.e. TensMul().
# If tensors are constructed correctly, this should never occur.
return "1"
if self.coeff == S.NegativeOne:
# expressions like "-A(a)"
return "-"+"*".join([get_str(arg) for arg in args[1:]])
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
return "*".join([get_str(arg) for arg in self.args])
@property
def data(self):
dat = _tensor_data_substitution_dict[self]
if dat is None:
return None
return self.coeff * dat
@data.setter
def data(self, data):
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return (self.data.flatten()).__iter__()
def canon_bp(p):
"""
Butler-Portugal canonicalization
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = S(2)/3*t_r
t1 = - S(1)/3*t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))
t2 = S(1)/3*t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead, riemann_cyclic
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> R = tensorhead('R', [Lorentz]*4, [[2, 2]])
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t)
0
"""
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3)
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
tids = ex._tids
components = tids.components
dt = {}
for c in components:
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
dum = tids.dum
lines = []
traces = []
traces1 = []
for p0, p1, c0, c1 in dum:
if components[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[components[c0]]
ta1 = dt[components[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1)in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[components[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(components)) if x not in rest]
return lines, traces, rest
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_tids(t):
if isinstance(t, TensExpr):
return t._tids
return TIDS([], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
new_tids = get_tids(t).perm2tensor(g, canon_bp)
coeff = get_coeff(t)
if g[-1] != len(g) - 1:
coeff = -coeff
res = TensMul.from_TIDS(coeff, new_tids, is_canon_bp=canon_bp)
return res
def substitute_indices(t, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Note: this method will neither raise or lower the indices, it will just replace their symbol.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensorhead('A,B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i,j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
if not isinstance(t, TensExpr):
return t
free = t.free
free1 = []
for j, ipos, cpos in free:
for i, v in index_tuples:
if i._name == j._name and i._tensortype == j._tensortype:
if i._is_up == j._is_up:
free1.append((v, ipos, cpos))
else:
free1.append((-v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
t = TensMul.from_data(t.coeff, t.components, free1, t.dum)
return t
| vipulroxx/sympy | sympy/tensor/tensor.py | Python | bsd-3-clause | 133,023 |
import six
from sqlalchemy_utils.utils import str_coercible
from .weekday import WeekDay
@str_coercible
class WeekDays(object):
def __init__(self, bit_string_or_week_days):
if isinstance(bit_string_or_week_days, six.string_types):
self._days = set()
if len(bit_string_or_week_days) != WeekDay.NUM_WEEK_DAYS:
raise ValueError(
'Bit string must be {0} characters long.'.format(
WeekDay.NUM_WEEK_DAYS
)
)
for index, bit in enumerate(bit_string_or_week_days):
if bit not in '01':
raise ValueError(
'Bit string may only contain zeroes and ones.'
)
if bit == '1':
self._days.add(WeekDay(index))
elif isinstance(bit_string_or_week_days, WeekDays):
self._days = bit_string_or_week_days._days
else:
self._days = set(bit_string_or_week_days)
def __eq__(self, other):
if isinstance(other, WeekDays):
return self._days == other._days
elif isinstance(other, six.string_types):
return self.as_bit_string() == other
else:
return NotImplemented
def __iter__(self):
for day in sorted(self._days):
yield day
def __contains__(self, value):
return value in self._days
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.as_bit_string()
)
def __unicode__(self):
return u', '.join(six.text_type(day) for day in self)
def as_bit_string(self):
return ''.join(
'1' if WeekDay(index) in self._days else '0'
for index in six.moves.xrange(WeekDay.NUM_WEEK_DAYS)
)
| cheungpat/sqlalchemy-utils | sqlalchemy_utils/primitives/weekdays.py | Python | bsd-3-clause | 1,866 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SavedSearch.owner'
db.add_column(
'sentry_savedsearch',
'owner',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'], null=True
),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'SavedSearch.owner'
db.delete_column('sentry_savedsearch', 'owner_id')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apiapplication': {
'Meta': {
'object_name': 'ApiApplication'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'client_id': (
'django.db.models.fields.CharField', [], {
'default': "'fe2733e3954542ac8f6cf90ffcbd3b79e389172515a24faeb7ed124ffd78c9e8'",
'unique': 'True',
'max_length': '64'
}
),
'client_secret': (
'sentry.db.models.fields.encrypted.EncryptedTextField', [], {
'default': "'e215d3d1ec6e44ef8cf5fd79620982ffc5dd33d7225643149e601a15046d8109'"
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'homepage_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'default': "'Nongenealogic Salvatore'",
'max_length': '64',
'blank': 'True'
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'privacy_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'terms_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.apiauthorization': {
'Meta': {
'unique_together': "(('user', 'application'),)",
'object_name': 'ApiAuthorization'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apigrant': {
'Meta': {
'object_name': 'ApiGrant'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']"
}
),
'code': (
'django.db.models.fields.CharField', [], {
'default': "'aad6032d25614f529931584f529a0eaf'",
'max_length': '64',
'db_index': 'True'
}
),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 3, 23, 0, 0)',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'redirect_uri': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 4, 22, 0, 0)',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'refresh_token': (
'django.db.models.fields.CharField', [], {
'default': "'76d64e267e4549119955c610411aa1b739da24b5406047fe9e44c04e512f5d42'",
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token': (
'django.db.models.fields.CharField', [], {
'default': "'74be333b72c44fd3b22922d41794a59f8084beb8f7194099a9f09690a5ef545b'",
'unique': 'True',
'max_length': '64'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config':
('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 3, 30, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {
'object_name': 'Deploy'
},
'date_finished':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.dsymapp': {
'Meta': {
'unique_together': "(('project', 'platform', 'app_id'),)",
'object_name': 'DSymApp'
},
'app_id': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'sync_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'through': "orm['sentry.EnvironmentProject']",
'symmetrical': 'False'
}
)
},
'sentry.environmentproject': {
'Meta': {
'unique_together': "(('project', 'environment'),)",
'object_name': 'EnvironmentProject'
},
'environment': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Environment']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {
'unique_together': "(('raw_event', 'processing_issue'),)",
'object_name': 'EventProcessingIssue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'processing_issue': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProcessingIssue']"
}
),
'raw_event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.RawEvent']"
}
)
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupcommitresolution': {
'Meta': {
'unique_together': "(('group_id', 'commit_id'),)",
'object_name': 'GroupCommitResolution'
},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {
'unique_together': "(('project', 'checksum', 'type'),)",
'object_name': 'ProcessingIssue'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'db_index': 'True'
}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '30'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'flags':
('django.db.models.fields.BigIntegerField', [], {
'default': '0',
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'RawEvent'
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.release': {
'Meta': {
'unique_together': "(('organization', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.reprocessingreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'ReprocessingReport'
},
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'ESp86sVflgizIaMZyG2qmXPESoTsJYEM'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.versiondsymfile': {
'Meta': {
'unique_together': "(('dsym_file', 'version', 'build'),)",
'object_name': 'VersionDSymFile'
},
'build':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'dsym_app': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymApp']"
}
),
'dsym_file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProjectDSymFile']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '32'
})
}
}
complete_apps = ['sentry']
| ifduyue/sentry | src/sentry/south_migrations/0310_auto__add_field_savedsearch_owner.py | Python | bsd-3-clause | 109,233 |
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| soulmachine/scikit-learn | examples/cluster/plot_kmeans_digits.py | Python | bsd-3-clause | 4,526 |
"""Tools for spectral analysis.
"""
import numpy as np
from scipy import fft as sp_fft
from . import _signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be 1-D and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center measurement values by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
welch: Power spectral density by Welch's method
spectrogram: Spectrogram by Welch's method
csd: Cross spectral density by Welch's method
Examples
--------
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
First define some input parameters for the signal:
>>> A = 2.
>>> w0 = 1. # rad/sec
>>> nin = 150
>>> nout = 100000
Randomly generate sample times:
>>> x = rng.uniform(0, 10*np.pi, nin)
Plot a sine wave for the selected times:
>>> y = A * np.cos(w0*x)
Define the array of frequencies for which to compute the periodogram:
>>> w = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, w, normalize=True)
Now make a plot of the input data:
>>> fig, (ax_t, ax_w) = plt.subplots(2, 1, constrained_layout=True)
>>> ax_t.plot(x, y, 'b+')
>>> ax_t.set_xlabel('Time [s]')
Then plot the normalized periodogram:
>>> ax_w.plot(w, pgram)
>>> ax_w.set_xlabel('Angular frequency [rad/s]')
>>> ax_w.set_ylabel('Normalized amplitude')
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.000985320699252543
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[tuple(s)]
nperseg = nfft
nfft = None
return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0,
nfft=nfft, detrend=detrend, return_onesided=return_onesided,
scaling=scaling, axis=axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
If we now introduce a discontinuity in the signal, by increasing the
amplitude of a small portion of the signal by 50, we can see the
corruption of the mean average power spectral density, but using a
median average better estimates the normal behaviour.
>>> x[int(N//2):int(N//2)+10] *= 50.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median')
>>> plt.semilogy(f, Pxx_den, label='mean')
>>> plt.semilogy(f_med, Pxx_den_med, label='median')
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.legend()
>>> plt.show()
"""
freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
return_onesided=return_onesided, scaling=scaling,
axis=axis, average=average)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. If the spectrum is
complex, the average is computed separately for the real and
imaginary parts. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
-----
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
if average == 'median':
# np.median must be passed real arrays for the desired result
if np.iscomplexobj(Pxy):
Pxy = (np.median(np.real(Pxy), axis=-1)
+ 1j * np.median(np.imag(Pxy), axis=-1))
Pxy /= _median_bias(Pxy.shape[-1])
else:
Pxy = np.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1])
elif average == 'mean':
Pxy = Pxy.mean(axis=-1)
else:
raise ValueError('average must be "median" or "mean", got %s'
% (average,))
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> from scipy.fft import fftshift
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx, shading='gouraud')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
Note, if using output that is not one sided, then use the following:
>>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False)
>>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""Check whether the Constant OverLap Add (COLA) constraint is met.
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, it is sufficient that the signal windowing obeys the constraint of
"Constant OverLap Add" (COLA). This ensures that every point in the input
data is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.windows.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.windows.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def check_NOLA(window, nperseg, noverlap, tol=1e-10):
r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met.
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies the NOLA constraint within
`tol`, `False` otherwise
See Also
--------
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
for all :math:`n`, where :math:`w` is the window function, :math:`t` is the
frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` -
`noverlap`).
This ensures that the normalization factors in the denominator of the
overlap-add inversion equation are not zero. Only very pathological windows
will fail the NOLA constraint.
.. versionadded:: 1.2.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm NOLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75)
True
NOLA is also true for 25% (1/4) overlap:
>>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25)
True
"Symmetrical" Hann window (for filter design) is also NOLA:
>>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60)
True
As long as there is overlap, it takes quite a pathological window to fail
NOLA:
>>> w = np.ones(64, dtype="float")
>>> w[::2] = 0
>>> signal.check_NOLA(w, 64, 32)
False
If there is not enough overlap, a window with zeros at the ends will not
work:
>>> signal.check_NOLA(signal.windows.hann(64), 64, 0)
False
>>> signal.check_NOLA(signal.windows.hann(64), 64, 1)
False
>>> signal.check_NOLA(signal.windows.hann(64), 64, 2)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg')
if noverlap < 0:
raise ValueError('noverlap must be a nonnegative integer')
noverlap = int(noverlap)
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]**2
return np.min(binsums) > tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Nonzero
OverLap Add" (NOLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop
size :math:`H` = `nperseg - noverlap`, the windowed frame at time index
:math:`t` is given by
.. math:: x_{t}[n]=x[n]w[n-tH]
The overlap-add (OLA) reconstruction equation is given by
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
The NOLA constraint ensures that every normalization term that appears
in the denomimator of the OLA reconstruction equation is nonzero. Whether a
choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can
be tested with `check_NOLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = rng.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is `True`,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
This ensures that the normalization factors that appear in the denominator
of the overlap-add reconstruction equation
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
are not zero. The NOLA constraint can be checked with the `check_NOLA`
function.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares estimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = rng.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
norm = norm[..., nperseg//2:-(nperseg//2)]
# Divide out normalization where non-tiny
if np.sum(norm > 1e-10) != len(norm):
warnings.warn("NOLA condition failed, STFT may not be invertible")
x /= np.where(norm > 1e-10, norm, 1.0)
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.moveaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
axis=axis)
_, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
nfft=nfft, detrend=detrend, axis=axis)
_, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd', boundary=None,
padded=False):
"""Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
----------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError as e:
raise ValueError('x and y cannot be broadcast together.') from e
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.moveaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.moveaxis(x, axis, -1)
if not same_data and y.ndim > 1:
y = np.moveaxis(y, axis, -1)
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return _signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.moveaxis(d, -1, axis)
d = detrend(d)
return np.moveaxis(d, axis, -1)
else:
detrend_func = detrend
if np.result_type(win, np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = sp_fft.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = sp_fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.moveaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
`scipy.signal._spectral_helper`.
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# https://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = sp_fft.fft
else:
result = result.real
func = sp_fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg, input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
----------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
window.
"""
# parse window; if array like, then set nperseg = win.shape
if isinstance(window, str) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different"
" from length of window")
return win, nperseg
def _median_bias(n):
"""
Returns the bias of the median of a set of periodograms relative to
the mean.
See Appendix B from [1]_ for details.
Parameters
----------
n : int
Numbers of periodograms being averaged.
Returns
-------
bias : float
Calculated bias.
References
----------
.. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton.
"FINDCHIRP: an algorithm for detection of gravitational waves from
inspiraling compact binaries", Physical Review D 85, 2012,
:arxiv:`gr-qc/0509116`
"""
ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
| zerothi/scipy | scipy/signal/_spectral_py.py | Python | bsd-3-clause | 74,381 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import types
import binascii
import base64
from twisted.python import log
from twisted.application import service, strports
from twisted.cred import checkers, portal
from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
from twisted.conch.insults import insults
from twisted.internet import protocol
from buildbot.util import ComparableMixin
from zope.interface import implements # requires Twisted-2.0 or later
# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
class makeTelnetProtocol:
# this curries the 'portal' argument into a later call to
# TelnetTransport()
def __init__(self, portal):
self.portal = portal
def __call__(self):
auth = telnet.AuthenticatingTelnetProtocol
return telnet.TelnetTransport(auth, self.portal)
class _TelnetRealm:
implements(portal.IRealm)
def __init__(self, namespace_maker):
self.namespace_maker = namespace_maker
def requestAvatar(self, avatarId, *interfaces):
if telnet.ITelnetProtocol in interfaces:
namespace = self.namespace_maker()
p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
manhole.ColoredManhole,
namespace)
return (telnet.ITelnetProtocol, p, lambda: None)
raise NotImplementedError()
class chainedProtocolFactory:
# this curries the 'namespace' argument into a later call to
# chainedProtocolFactory()
def __init__(self, namespace):
self.namespace = namespace
def __call__(self):
return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
"""Accept connections using SSH keys from a given file.
SSHPublicKeyDatabase takes the username that the prospective client has
requested and attempts to get a ~/.ssh/authorized_keys file for that
username. This requires root access, so it isn't as useful as you'd
like.
Instead, this subclass looks for keys in a single file, given as an
argument. This file is typically kept in the buildmaster's basedir. The
file should have 'ssh-dss ....' lines in it, just like authorized_keys.
"""
def __init__(self, authorized_keys_file):
self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
def checkKey(self, credentials):
f = open(self.authorized_keys_file)
for l in f.readlines():
l2 = l.split()
if len(l2) < 2:
continue
try:
if base64.decodestring(l2[1]) == credentials.blob:
return 1
except binascii.Error:
continue
return 0
class _BaseManhole(service.MultiService):
"""This provides remote access to a python interpreter (a read/exec/print
loop) embedded in the buildmaster via an internal SSH server. This allows
detailed inspection of the buildmaster state. It is of most use to
buildbot developers. Connect to this by running an ssh client.
"""
def __init__(self, port, checker, using_ssh=True):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@type checker: an object providing the
L{twisted.cred.checkers.ICredentialsChecker} interface
@param checker: if provided, this checker is used to authenticate the
client instead of using the username/password scheme. You must either
provide a username/password or a Checker. Some useful values are::
import twisted.cred.checkers as credc
import twisted.conch.checkers as conchc
c = credc.AllowAnonymousAccess # completely open
c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
@type using_ssh: bool
@param using_ssh: If True, accept SSH connections. If False, accept
regular unencrypted telnet connections.
"""
# unfortunately, these don't work unless we're running as root
#c = credc.PluggableAuthenticationModulesChecker: PAM
#c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
# and I can't get UNIXPasswordDatabase to work
service.MultiService.__init__(self)
if type(port) is int:
port = "tcp:%d" % port
self.port = port # for comparison later
self.checker = checker # to maybe compare later
def makeNamespace():
# close over 'self' so we can get access to .parent later
master = self.parent
namespace = {
'master': master,
'status': master.getStatus(),
'show': show,
}
return namespace
def makeProtocol():
namespace = makeNamespace()
p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
return p
self.using_ssh = using_ssh
if using_ssh:
r = manhole_ssh.TerminalRealm()
r.chainedProtocolFactory = makeProtocol
p = portal.Portal(r, [self.checker])
f = manhole_ssh.ConchFactory(p)
else:
r = _TelnetRealm(makeNamespace)
p = portal.Portal(r, [self.checker])
f = protocol.ServerFactory()
f.protocol = makeTelnetProtocol(p)
s = strports.service(self.port, f)
s.setServiceParent(self)
def startService(self):
service.MultiService.startService(self)
if self.using_ssh:
via = "via SSH"
else:
via = "via telnet"
log.msg("Manhole listening %s on port %s" % (via, self.port))
class TelnetManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts unencrypted (telnet) connections, and requires a
username and password authorize access. You are encouraged to use the
encrypted ssh-based manhole classes instead."""
compare_attrs = ["port", "username", "password"]
def __init__(self, port, username, password):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param username:
@param password: username= and password= form a pair of strings to
use when authenticating the remote user.
"""
self.username = username
self.password = password
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser(username, password)
_BaseManhole.__init__(self, port, c, using_ssh=False)
class PasswordManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts encrypted (ssh) connections, and requires a
username and password to authorize access.
"""
compare_attrs = ["port", "username", "password"]
def __init__(self, port, username, password):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param username:
@param password: username= and password= form a pair of strings to
use when authenticating the remote user.
"""
self.username = username
self.password = password
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser(username, password)
_BaseManhole.__init__(self, port, c)
class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts ssh connections, and requires that the
prospective client have an ssh private key that matches one of the public
keys in our authorized_keys file. It is created with the name of a file
that contains the public keys that we will accept."""
compare_attrs = ["port", "keyfile"]
def __init__(self, port, keyfile):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param keyfile: the name of a file (relative to the buildmaster's
basedir) that contains SSH public keys of authorized
users, one per line. This is the exact same format
as used by sshd in ~/.ssh/authorized_keys .
"""
# TODO: expanduser this, and make it relative to the buildmaster's
# basedir
self.keyfile = keyfile
c = AuthorizedKeysChecker(keyfile)
_BaseManhole.__init__(self, port, c)
class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts ssh connections, but uses an arbitrary
user-supplied 'checker' object to perform authentication."""
compare_attrs = ["port", "checker"]
def __init__(self, port, checker):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param checker: an instance of a twisted.cred 'checker' which will
perform authentication
"""
_BaseManhole.__init__(self, port, checker)
## utility functions for the manhole
def show(x):
"""Display the data attributes of an object in a readable format"""
print "data attributes of %r" % (x,)
names = dir(x)
maxlen = max([0] + [len(n) for n in names])
for k in names:
v = getattr(x,k)
t = type(v)
if t == types.MethodType: continue
if k[:2] == '__' and k[-2:] == '__': continue
if t is types.StringType or t is types.UnicodeType:
if len(v) > 80 - maxlen - 5:
v = `v[:80 - maxlen - 5]` + "..."
elif t in (types.IntType, types.NoneType):
v = str(v)
elif v in (types.ListType, types.TupleType, types.DictType):
v = "%s (%d elements)" % (v, len(v))
else:
v = str(t)
print "%*s : %s" % (maxlen, k, v)
return x
| eunchong/build | third_party/buildbot_8_4p1/buildbot/manhole.py | Python | bsd-3-clause | 11,523 |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetMplsTTLAction,
DecMplsTTLAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_MPLS_UCAST
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_42():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 42 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify MPLS TTL example1"
priority = 900
cookie = 1300
match_in_port = 3
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_mod_mpls_ttl = 2
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Set MPLS TTL (%s)\n"
" Output (%s)" %
(act_mod_mpls_ttl, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetMplsTTLAction(action_order)
action.set_ttl(act_mod_mpls_ttl)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify MPLS TTL example2"
priority = 900
cookie = 1300
match_in_port = 112
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_out_port = 3
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Decrement MPLS TTL\n"
" Output (%s)" %
(act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = DecMplsTTLAction(action_order)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_42()
| tnadeau/pybvc | samples/sampleopenflow/demos/demo42.py | Python | bsd-3-clause | 8,783 |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| phobson/seaborn | seaborn/external/six.py | Python | bsd-3-clause | 30,099 |
import sqlalchemy_utils
from babel import Locale
from wtforms import Form
from tests import MultiDict
from wtforms_alchemy import CountryField
sqlalchemy_utils.i18n.get_locale = lambda: Locale('en')
class TestCountryField(object):
field_class = CountryField
def init_form(self, **kwargs):
class TestForm(Form):
test_field = self.field_class(**kwargs)
self.form_class = TestForm
return self.form_class
def setup_method(self, method):
self.valid_countries = [
'US',
'SA',
'FI'
]
self.invalid_countries = [
'unknown',
]
def test_valid_countries(self):
form_class = self.init_form()
for country in self.valid_countries:
form = form_class(MultiDict(test_field=country))
form.validate()
assert len(form.errors) == 0
def test_invalid_countries(self):
form_class = self.init_form()
for country in self.invalid_countries:
form = form_class(MultiDict(test_field=country))
form.validate()
assert len(form.errors['test_field']) == 2
| quantus/wtforms-alchemy | tests/test_country_field.py | Python | bsd-3-clause | 1,170 |
"""Functions to construct sparse matrices
"""
__docformat__ = "restructuredtext en"
__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand']
from warnings import warn
import numpy as np
from sputils import upcast
from csr import csr_matrix
from csc import csc_matrix
from bsr import bsr_matrix
from coo import coo_matrix
from lil import lil_matrix
from dia import dia_matrix
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : integer
Shape of the identity matrix.
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if format in ['csr','csc']:
indptr = np.arange(n+1, dtype=np.intc)
indices = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
cls = eval('%s_matrix' % format)
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = np.arange(n, dtype=np.intc)
col = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
elif format == 'dia':
data = np.ones(n, dtype=dtype)
diags = [0]
return dia_matrix((data,diags), shape=(n,n))
else:
return identity(n, dtype=dtype, format='csr').asformat(format)
def eye(m, n, k=0, dtype='d', format=None):
"""eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
"""
m,n = int(m),int(n)
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
#B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
#use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) #since L + R is not always same format
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> hstack( [A,B] ).todense()
matrix([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5,6]])
>>> vstack( [A,B] ).todense()
matrix([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([ [b] for b in blocks ], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks
grid of sparse matrices with compatible shapes
an entry of None implies an all-zero matrix
format : sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if np.rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = np.zeros(blocks.shape, dtype=np.bool)
brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)
bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
nnz = sum([ A.nnz for A in blocks[block_mask] ])
if dtype is None:
dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
data = np.empty(nnz, dtype=dtype)
row = np.empty(nnz, dtype=np.intc)
col = np.empty(nnz, dtype=np.intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None):
"""Generate a sparse matrix of the given shape and density with uniformely
distributed values.
Parameters
----------
m, n: int
shape of the matrix
density: real
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format: str
sparse matrix format.
dtype: dtype
type of the returned matrix values.
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and not dtype in [np.float32, np.float64, np.longdouble]:
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
# XXX: sparse uses intc instead of intp...
tp = np.intp
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = long(density * m * n)
# Generate a few more values than k so that we can get unique values
# afterwards.
# XXX: one could be smarter here
mlow = 5
fac = 1.02
gk = min(k + mlow, fac * k)
def _gen_unique_rand(_gk):
id = np.random.rand(_gk)
return np.unique(np.floor(id * mn))[:k]
id = _gen_unique_rand(gk)
while id.size < k:
gk *= 1.05
id = _gen_unique_rand(gk)
j = np.floor(id * 1. / m).astype(tp)
i = (id - j * m).astype(tp)
vals = np.random.rand(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
| lesserwhirls/scipy-cwt | scipy/sparse/construct.py | Python | bsd-3-clause | 12,885 |
from bokeh.util.deprecate import deprecated_module
deprecated_module('bokeh.properties', '0.11', 'use bokeh.core.properties instead')
del deprecated_module
from .core.properties import * # NOQA
| phobson/bokeh | bokeh/properties.py | Python | bsd-3-clause | 195 |
#!/usr/bin/env python2.7
from bioblend.galaxy import GalaxyInstance
import requests
import datetime
import argparse
requests.packages.urllib3.disable_warnings()
def parse_args():
args = argparse.ArgumentParser(description="Rename history items using a tabular file." +"\n" +
"Example usage: python rename_hist_items.py -url misssissippi.snv.jussieu.fr \
-key $your_api_key -hid $your_history_id -table $your_tabular_file \n \
See test-data/sample_table.tab for an example file.")
args.add_argument("-url", "--galaxy_url", required=True, help="url of galaxy instance")
args.add_argument("-key", "--api_key", required=True, help="api key for galaxy instance" )
args.add_argument("-hid", "--history_id", required=True, help="History id of hitory containing files to be renamed")
args.add_argument("-table", "--rename_table", required=True, type=file,
help="tab-seperated file with first column current filename,\
and second column the desired name")
return args.parse_args()
def return_datetime(string_representation):
"""
returns current time, to find last modified history.
Currently ununsed, may be used in the future.
"""
date, time = string_representation.split('T')
return datetime.datetime.strptime(date + ' ' + time, "%Y-%m-%d %H:%M:%S.%f")
def get_rename_list(rename_table):
return [(line.split('\t')[0],line.split('\t')[1].strip()) for line in rename_table]
def get_instance(url, api_key):
return GalaxyInstance(url, api_key)
def get_name_id_d(gi, hid):
return {dataset[u'name']:dataset[u'id'] for dataset in gi.histories.show_history(hid, contents=True)}
def update_names(gi, hid, rename_list, name_id_d ):
for old_name, new_name in rename_list:
dataset_id = name_id_d[old_name]
gi.histories.update_dataset(history_id=hid, dataset_id=dataset_id, name=new_name)
def main():
args = parse_args()
hid = args.history_id
rename_list = get_rename_list(args.rename_table)
gi = get_instance(args.galaxy_url, args.api_key)
name_id_d = get_name_id_d(gi, hid)
rval = update_names(gi, hid, rename_list, name_id_d)
if __name__ == "__main__":
main()
| ARTbio/tools-artbio | scripts/helper_scripts/rename_history_items/rename_hist_items.py | Python | mit | 2,334 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .auto_rest_url_test_service import AutoRestUrlTestService, AutoRestUrlTestServiceConfiguration
from .version import VERSION
__all__ = [
'AutoRestUrlTestService',
'AutoRestUrlTestServiceConfiguration'
]
__version__ = VERSION
| brodyberg/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/__init__.py | Python | mit | 714 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.unit.fixture_data import base
class V1(base.Fixture):
base_url = 'os-availability-zone'
zone_info_key = 'availabilityZoneInfo'
zone_name_key = 'zoneName'
zone_state_key = 'zoneState'
def setUp(self):
super(V1, self).setUp()
get_os_availability_zone = {
self.zone_info_key: [
{
self.zone_name_key: "zone-1",
self.zone_state_key: {"available": True},
"hosts": None
},
{
self.zone_name_key: "zone-2",
self.zone_state_key: {"available": False},
"hosts": None
}
]
}
self.requests.register_uri('GET', self.url(),
json=get_os_availability_zone,
headers=self.json_headers)
get_os_zone_detail = {
self.zone_info_key: [
{
self.zone_name_key: "zone-1",
self.zone_state_key: {"available": True},
"hosts": {
"fake_host-1": {
"nova-compute": {
"active": True,
"available": True,
"updated_at": '2012-12-26 14:45:25'
}
}
}
},
{
self.zone_name_key: "internal",
self.zone_state_key: {"available": True},
"hosts": {
"fake_host-1": {
"nova-sched": {
"active": True,
"available": True,
"updated_at": '2012-12-26 14:45:25'
}
},
"fake_host-2": {
"nova-network": {
"active": True,
"available": False,
"updated_at": '2012-12-26 14:45:24'
}
}
}
},
{
self.zone_name_key: "zone-2",
self.zone_state_key: {"available": False},
"hosts": None
}
]
}
self.requests.register_uri('GET', self.url('detail'),
json=get_os_zone_detail,
headers=self.json_headers)
| sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/novaclient/tests/unit/fixture_data/availability_zones.py | Python | mit | 3,264 |
#!/usr/bin/env python
"""
C.5 Classes, Packages, and Page Styles (p176)
"""
import codecs, sys, os
from plasTeX import Command, Environment, DimenCommand, Token
from plasTeX.Logging import getLogger
# Put the plasTeX packages into the path
from plasTeX import Packages
sys.path.append(os.path.dirname(Packages.__file__))
del Packages
log = getLogger()
status = getLogger('status')
class PackageLoader(Command):
extension = '.sty'
def load(self, tex, file, options={}):
try:
self.ownerDocument.context.loadPackage(tex, file+self.extension, options)
except Exception, msg:
log.error('Could not load package "%s": %s' % (file, msg))
#
# C.5.1 Document Class
#
class documentclass(PackageLoader):
args = '[ options:dict ] name:str'
extension = '.cls'
def invoke(self, tex):
a = self.parse(tex)
self.load(tex, a['name'], a['options'])
packages = self.ownerDocument.context.packages
if a['name'] in packages:
packages['documentclass'] = packages[a['name']]
class documentstyle(documentclass):
pass
#
# Style Parameters
#
class bibindent(DimenCommand):
value = DimenCommand.new(0)
class columnsep(DimenCommand):
value = DimenCommand.new(0)
class columnseprule(DimenCommand):
value = DimenCommand.new(0)
class mathindent(DimenCommand):
value = DimenCommand.new(0)
#
# C.5.2 Packages
#
class usepackage(PackageLoader):
args = '[ options:dict ] names:list:str'
extension = '.sty'
def invoke(self, tex):
# Allow & in option names (this happens in natbib)
catcode = self.ownerDocument.context.whichCode('&')
self.ownerDocument.context.catcode('&', Token.CC_LETTER)
a = self.parse(tex)
self.ownerDocument.context.catcode('&', catcode)
for file in a['names']:
self.load(tex, file, a['options'])
class RequirePackage(usepackage):
pass
#
# C.5.3 Page Styles
#
class pagestyle(Command):
args = 'style:str'
class thispagestyle(pagestyle):
pass
class markright(Command):
args = 'text'
class markboth(Command):
args = 'left right'
class pagenumbering(Command):
args = 'style:str'
class twocolumn(Command):
args = '[ text ]'
class onecolumn(Command):
pass
#
# Style Parameters
#
# Figure C.3: Page style parameters
class paperheight(DimenCommand):
value = DimenCommand.new('11in')
class paperwidth(DimenCommand):
value = DimenCommand.new('8.5in')
class oddsidemargin(DimenCommand):
value = DimenCommand.new('1in')
class evensidemargin(DimenCommand):
value = DimenCommand.new('1in')
class textheight(DimenCommand):
value = DimenCommand.new('9in')
class textwidth(DimenCommand):
value = DimenCommand.new('6.5in')
class topmargin(DimenCommand):
value = DimenCommand.new(0)
class headheight(DimenCommand):
value = DimenCommand.new('0.5in')
class headsep(DimenCommand):
value = DimenCommand.new('0.25in')
class footskip(DimenCommand):
value = DimenCommand.new('0.5in')
class marginparsep(DimenCommand):
value = DimenCommand.new('0.25in')
class marginparwidth(DimenCommand):
value = DimenCommand.new('0.75in')
class topskip(DimenCommand):
value = DimenCommand.new(0)
#
# C.5.4 The Title Page and Abstract
#
class maketitle(Command):
blockType = True
class title(Command):
args = '[ toc ] self'
def invoke(self, tex):
Command.invoke(self, tex)
if not self.ownerDocument.userdata.has_key('title'):
self.ownerDocument.userdata['title'] = self
class author(Command):
args = 'self'
def invoke(self, tex):
Command.invoke(self, tex)
userdata = self.ownerDocument.userdata
if userdata.get('author') is None:
userdata['author'] = []
userdata['author'].append(self)
class date(Command):
args = 'self'
def invoke(self, tex):
Command.invoke(self, tex)
self.ownerDocument.userdata['date'] = self
class thanks(Command):
args = 'self'
def invoke(self, tex):
Command.invoke(self, tex)
self.ownerDocument.userdata['thanks'] = self
class abstract(Environment):
blockType = True
class titlepage(Environment):
blockType = True
#
# Extras...
#
class ProvidesPackage(Command):
args = 'name [ message ]'
class ProvidesClass(Command):
pass
class DeclareOption(Command):
args = 'name:str [ default:nox ] value:nox'
class PackageWarning(Command):
args = 'name:str message:str'
class ProcessOptions(Command):
args = '*'
class LoadClass(usepackage):
args = '[ options:dict ] names:list:str'
extension = '.cls'
class NeedsTeXFormat(Command):
args = 'name:str date:str'
class InputIfFileExists(Command):
args = 'file:str true:nox false:nox'
def invoke(self, tex):
a = self.parse(tex)
try:
tex.input(tex.kpsewhich(a['file']))
tex.pushTokens(a['true'])
except (IOError, OSError):
tex.pushTokens(a['false'])
return []
class IfFileExists(Command):
args = 'file:str true:nox false:nox'
def invoke(self, tex):
a = self.parse(tex)
try:
tex.kpsewhich(a['file'])
tex.pushTokens(a['true'])
except (IOError, OSError):
tex.pushTokens(a['false'])
return []
| themutt/plastex | plasTeX/Base/LaTeX/Packages.py | Python | mit | 5,361 |
"""
https://webdocs.cs.ualberta.ca/~sutton/MountainCar/MountainCar1.cp
"""
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action-1)*0.001 + math.cos(3*position)*(-0.0025)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position==self.min_position and velocity<0): velocity = 0
done = bool(position >= self.goal_position)
reward = -1.0
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs)*.45+.55
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
carwidth=40
carheight=20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs-self.min_position)*scale, ys*scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l,r,t,b = -carwidth/2, carwidth/2, carheight, 0
car = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight/2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth/4,clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight/2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth/4,clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position-self.min_position)*scale
flagy1 = self._height(self.goal_position)*scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2-10), (flagx+25, flagy2-5)])
flag.set_color(.8,.8,0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation((pos-self.min_position)*scale, self._height(pos)*scale)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array = mode=='rgb_array')
| xpharry/Udacity-DLFoudation | tutorials/reinforcement/gym/gym/envs/classic_control/mountain_car.py | Python | mit | 4,262 |
import logging
import os
import time
from urllib2 import HTTPError, URLError
from djangoappengine.boot import PROJECT_DIR
from djangoappengine.utils import appid, have_appserver
REMOTE_API_SCRIPTS = (
'$PYTHON_LIB/google/appengine/ext/remote_api/handler.py',
'google.appengine.ext.remote_api.handler.application',
)
def auth_func():
import getpass
return raw_input("Login via Google Account (see note above if login fails): "), getpass.getpass("Password: ")
def rpc_server_factory(*args, ** kwargs):
from google.appengine.tools import appengine_rpc
kwargs['save_cookies'] = True
return appengine_rpc.HttpRpcServer(*args, ** kwargs)
class StubManager(object):
def __init__(self):
self.testbed = None
self.active_stubs = None
self.pre_test_stubs = None
def setup_stubs(self, connection):
if self.active_stubs is not None:
return
if not have_appserver:
self.activate_stubs(connection)
def activate_stubs(self, connection):
try:
from google.appengine.tools import dev_appserver_main
self.setup_local_stubs(connection)
except ImportError:
self.activate_test_stubs(connection)
def reset_stubs(self, connection, datastore_path=None):
if self.active_stubs == 'test':
self.deactivate_test_stubs()
self.activate_test_stubs(connection, datastore_path)
elif self.active_stubs == 'local':
self.setup_local_stubs(connection)
elif self.active_stubs == 'remote':
self.setup_remote_stubs(connection)
def activate_test_stubs(self, connection, datastore_path=None):
if self.active_stubs == 'test':
return
if self.testbed is None:
from google.appengine.ext.testbed import Testbed
self.testbed = Testbed()
self.testbed.activate()
self.pre_test_stubs = self.active_stubs
self.active_stubs = 'test'
os.environ['APPLICATION_ID'] = 'dev~' + appid
os.environ['HTTP_HOST'] = "%s.appspot.com" % appid
appserver_opts = connection.settings_dict.get('DEV_APPSERVER_OPTIONS', {})
high_replication = appserver_opts.get('high_replication', False)
require_indexes = appserver_opts.get('require_indexes', False)
use_sqlite = appserver_opts.get('use_sqlite', False)
datastore_opts = {'require_indexes': require_indexes, 'use_sqlite': use_sqlite}
if high_replication:
from google.appengine.datastore import datastore_stub_util
datastore_opts['consistency_policy'] = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(datastore_file=datastore_path, **datastore_opts)
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(auto_task_running=True, root_path=PROJECT_DIR)
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
self.testbed.init_xmpp_stub()
self.testbed.init_channel_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_images_stub()
def deactivate_test_stubs(self):
if self.active_stubs == 'test':
self.testbed.deactivate()
self.active_stubs = self.pre_test_stubs
def setup_local_stubs(self, connection):
if self.active_stubs == 'local':
return
from .base import get_datastore_paths
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args.update(get_datastore_paths(connection.settings_dict))
args.update(connection.settings_dict.get('DEV_APPSERVER_OPTIONS', {}))
log_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARNING)
try:
from google.appengine.tools import dev_appserver
except ImportError:
from google.appengine.tools import old_dev_appserver as dev_appserver
dev_appserver.SetupStubs('dev~' + appid, **args)
logging.getLogger().setLevel(log_level)
self.active_stubs = 'local'
def setup_remote_stubs(self, connection):
if self.active_stubs == 'remote':
return
if not connection.remote_api_path:
from djangoappengine.utils import appconfig
from google.appengine.api import appinfo
default_module = next(m for m in appconfig.modules if m.module_name == appinfo.DEFAULT_MODULE)
for handler in default_module.handlers:
if handler.script in REMOTE_API_SCRIPTS:
connection.remote_api_path = handler.url.split('(', 1)[0]
break
server = '%s.%s' % (connection.remote_app_id, connection.domain)
remote_url = 'https://%s%s' % (server, connection.remote_api_path)
logging.info("Setting up remote_api for '%s' at %s." %
(connection.remote_app_id, remote_url))
if not have_appserver:
logging.info(
"Connecting to remote_api handler.\n\n"
"IMPORTANT: Check your login method settings in the "
"App Engine Dashboard if you have problems logging in. "
"Login is only supported for Google Accounts.")
from google.appengine.ext.remote_api import remote_api_stub
remote_api_stub.ConfigureRemoteApi(None,
connection.remote_api_path, auth_func, servername=server,
secure=connection.secure_remote_api,
rpc_server_factory=rpc_server_factory)
retry_delay = 1
while retry_delay <= 16:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
if not have_appserver:
logging.info("Retrying in %d seconds..." % retry_delay)
time.sleep(retry_delay)
retry_delay *= 2
else:
break
else:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
raise URLError("%s\n"
"Couldn't reach remote_api handler at %s.\n"
"Make sure you've deployed your project and "
"installed a remote_api handler in app.yaml. "
"Note that login is only supported for "
"Google Accounts. Make sure you've configured "
"the correct authentication method in the "
"App Engine Dashboard." % (e, remote_url))
logging.info("Now using the remote datastore for '%s' at %s." %
(connection.remote_app_id, remote_url))
self.active_stubs = 'remote'
stub_manager = StubManager()
| jbking/demo-appengine-django-golang | myproject/djangoappengine/db/stubs.py | Python | mit | 7,039 |
# Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M61' : [_('Set tool number'), 'Q'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'I', 'K'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],
'G64' : [_('Continuous mode'), 'P'],
'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
for i in range(self.numlabels):
self.not_editing(i)
self.editing(self.selected)
self.set_text("G")
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t + ".")
def minus(self, b):
t = self.get_text()
if self.selected > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
if tail.find("-") == -1:
self.set_text(head + "-" + tail)
else:
self.set_text(head + tail[1:])
def keypad(self, b):
t = self.get_text()
num = b.get_name()
self.set_text(t + num)
def gp(self, b):
self.g(b, "G", 1)
def g(self, b, code="G", polar=0):
self.mdi.set_polar(polar)
self.set_text(code, 0)
for i in range(1, self.numlabels):
self.set_text("", i)
self.editing(0)
self.mdi.clear()
def m(self, b):
self.g(b, "M")
def t(self, b):
self.g(b, "T")
def o(self, b):
old_code = self.labels[0].get_text()
ocodes = self.mdi.ocodes
if old_code in ocodes:
j = (ocodes.index(old_code) + 1) % len(ocodes)
else:
j = 0
self.g(b, ocodes[j])
self.next(b)
def select(self, eventbox, event):
n = int(eventbox.get_name()[12:])
if self.selected == 0:
self.fill_out()
if n <= self.numwords:
self.editing(n)
def set_tool(self, tool, g10l11):
self.g(0)
self.set_text("G10", 0)
self.next(0)
if g10l11:
self.set_text("L11", 1)
else:
self.set_text("L10", 1)
self.next(0)
self.set_text("P%d" % tool, 2)
self.next(0)
self.next(0)
self.next(0)
def set_origin(self, system):
self.g(0)
self.set_text("G10", 0)
self.next(0)
self.set_text("L20", 1)
self.next(0)
self.set_text("P%d" % system, 2)
self.next(0)
| CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver | src/emc/usr_intf/touchy/mdi.py | Python | gpl-2.0 | 10,007 |
from funcweb.widget_validation import WidgetSchemaFactory
from funcweb.widget_automation import WidgetListFactory,RemoteFormAutomation,RemoteFormFactory
from func.overlord.client import Overlord, Minions
import socket
import func.utils
class TestClientWidgetRender(object):
minion = None
def test_all_minions(self):
minions =Minions("*").get_all_hosts()
for m in minions:
self.minion = m
self.remote_widget_render()
def remote_widget_render(self):
print "\n******testing minion : %s**********"%(self.minion)
fc = Overlord(self.minion)
modules = fc.system.list_modules()
display_modules={}
print "Getting the modules that has exported arguments"
for module in modules.itervalues():
for mod in module:
#if it is not empty
exported_methods = getattr(fc,mod).get_method_args()[self.minion]
if exported_methods:
print "%s will be rendered"%(mod)
display_modules[mod]=exported_methods
#do the rendering work here
for module,exp_meths in display_modules.iteritems():
for method_name,other_options in exp_meths.iteritems():
minion_arguments = other_options['args']
if minion_arguments:
wlist_object = WidgetListFactory(minion_arguments,minion=self.minion,module=module,method=method_name)
wlist_object = wlist_object.get_widgetlist_object()
#print wlist_object
wf = WidgetSchemaFactory(minion_arguments)
schema_man=wf.get_ready_schema()
minion_form = RemoteFormAutomation(wlist_object,schema_man)
print "%s.%s.%s rendered"%(self.minion,module,method_name)
| dockerera/func | funcweb/funcweb/tests/test_client_rendering.py | Python | gpl-2.0 | 1,853 |
# -*- coding: utf-8 -*-
#
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Container import Container
from util import props_to_str
ENTRY_HTML = '<%(tag)s id="%(id)s" %(props)s>%(content)s</%(tag)s>'
class ListEntry (Container):
def __init__ (self, _props={}, tag='li'):
Container.__init__ (self)
self.tag = tag
self.props = _props.copy()
def Render (self):
render = Container.Render (self)
if 'id' in self.props:
self.id = self.props['id']
props = {'id': self.id,
'tag': self.tag,
'props': props_to_str(self.props),
'content': render.html}
render.html = ENTRY_HTML %(props)
return render
class List (Container):
"""
Widget for lists of elements. The list can grow dynamically, and
accept any kind of CTK widget as listed element. Arguments are
optional.
Arguments:
_props: dictionary with properties for the HTML element,
such as {'name': 'foo', 'id': 'bar', 'class': 'baz'}
tag: tag to use for the element, either 'ul' for unordered
lists, or 'ol' for ordered lists. By default, 'ul' is
used.
Examples:
lst = CTK.List()
lst.Add (CTK.RawHTML('One')
lst.Add (CTK.Image({'src': '/foo/bar/baz.png'})
"""
def __init__ (self, _props={}, tag='ul'):
Container.__init__ (self)
self.tag = tag
self.props = _props.copy()
def Add (self, widget, props={}):
assert isinstance(widget, Widget) or widget is None or type(widget) is list
entry = ListEntry (props.copy())
if widget:
if type(widget) == list:
for w in widget:
entry += w
else:
entry += widget
Container.__iadd__ (self, entry)
def __iadd__ (self, widget):
self.Add (widget)
return self
def Render (self):
render = Container.Render (self)
props = {'id': self.id,
'tag': self.tag,
'props': props_to_str(self.props),
'content': render.html}
render.html = ENTRY_HTML %(props)
return render
| youprofit/webserver | admin/CTK/CTK/List.py | Python | gpl-2.0 | 3,058 |
#!/usr/bin/python
# python script to generate an overview of the staes based on the input lex file.
#
# Copyright (C) 1997-2019 by Dimitri van Heesch.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation under the terms of the GNU General Public License is hereby
# granted. No representations are made about the suitability of this software
# for any purpose. It is provided "as is" without express or implied warranty.
# See the GNU General Public License for more details.
#
# Documents produced by Doxygen are derivative works derived from the
# input used in their production; they are not affected by this license.
#
import sys
import os
import re
def main():
if len(sys.argv)!=2:
sys.exit('Usage: %s <lex_file>' % sys.argv[0])
lex_file = sys.argv[1]
if (os.path.exists(lex_file)):
#write preamble
print("static const char *stateToString(int state)")
print("{")
print(" switch(state)")
print(" {")
print(" case INITIAL: return \"INITIAL\";")
with open(lex_file) as f:
for line in f:
if re.search(r'^%x', line) or re.search(r'^%s', line):
state = line.split()[1]
print(" case %s: return \"%s\";" % (state,state))
elif re.search(r'^%%', line):
break
else:
pass
f.close()
#write post
print(" }")
print(" return \"Unknown\";")
print("}")
if __name__ == '__main__':
main()
| ellert/doxygen | src/scan_states.py | Python | gpl-2.0 | 1,591 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_dfs_n.py
# Create Date: 2015-08-16 10:15:54
# Usage: AC_dfs_n.py
# Descripton:
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
path =[]
paths = []
def dfs(root):
if root:
path.append(str(root.val))
if root.left == None and root.right == None:
paths.append('->'.join(path))
dfs(root.left)
dfs(root.right)
path.pop()
dfs(root)
return paths
| yxping/leetcode | solutions/257.Binary_Tree_Paths/AC_dfs_n.py | Python | gpl-2.0 | 836 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.connection_info import ConnectionInformation
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, connection_loader, action_loader
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.debug import debug
__all__ = ['TaskExecutor']
import json
import time
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
def __init__(self, host, task, job_vars, connection_info, loader, module_loader):
self._host = host
self._task = task
self._job_vars = job_vars
self._connection_info = connection_info
self._loader = loader
self._module_loader = module_loader
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with
'''
debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
res = dict(results=item_results)
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
debug("calling self._execute()")
res = self._execute()
debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
debug("dumping result to json")
result = json.dumps(res)
debug("done dumping result, returning")
return result
except AnsibleError, e:
return dict(failed=True, msg=str(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
items = None
if self._task.loop and self._task.loop in lookup_loader:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
task_vars = self._job_vars.copy()
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
except AnsibleParserError, e:
results.append(dict(failed=True, msg=str(e)))
continue
# now we swap the internal task with the copy, execute,
# and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
results.append(res)
# FIXME: we should be sending back a callback result for each item in the loop here
print(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'):
final_items = []
for item in items:
variables['item'] = item
if self._task.evaluate_conditional(variables):
final_items.append(item)
return [",".join(final_items)]
else:
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it
self._connection_info.post_validate(variables=variables, loader=self._loader)
# get the connection and the handler for this execution
self._connection = self._get_connection(variables)
self._handler = self._get_action_handler(connection=self._connection)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
if not self._task.evaluate_conditional(variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
# Now we do final validation on the task, which sets all fields to their final values
self._task.post_validate(variables)
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = self._task.args.copy()
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(changed=True, include=include_file, include_variables=include_variables)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
# Read some values from the task, so that we can modify them if need be
retries = self._task.retries
if retries <= 0:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
debug("starting attempt loop")
result = None
for attempt in range(retries):
if attempt > 0:
# FIXME: this should use the callback/message passing mechanism
print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt))
result['attempts'] = attempt + 1
debug("running the handler")
result = self._handler.run(task_vars=variables)
debug("handler run complete")
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
result = json.loads(result.get('stdout'))
except ValueError, e:
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result)
# update the local copy of vars with the registered value, if specified
if self._task.register:
vars_copy[self._task.register] = result
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
# FIXME: make sure until is mutually exclusive with changed_when/failed_when
if self._task.until:
cond.when = self._task.until
if cond.evaluate_conditional(vars_copy):
break
elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
if self._task.changed_when:
cond.when = [ self._task.changed_when ]
result['changed'] = cond.evaluate_conditional(vars_copy)
if self._task.failed_when:
cond.when = [ self._task.failed_when ]
failed_when_result = cond.evaluate_conditional(vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
if failed_when_result:
break
elif 'failed' not in result and result.get('rc', 0) == 0:
# if the result is not failed, stop trying
break
if attempt < retries - 1:
time.sleep(delay)
debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result):
'''
Polls for the specified JID to be complete
'''
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = action_loader.get(
'normal',
task=async_task,
connection=self._connection,
connection_info=self._connection_info,
loader=self._loader,
module_loader=self._module_loader,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return async_result
def _get_connection(self, variables):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
# FIXME: delegate_to calculation should be done here
# FIXME: calculation of connection params/auth stuff should be done here
self._connection_info.remote_addr = self._host.ipv4_address
if self._task.delegate_to is not None:
self._compute_delegate(variables)
# FIXME: add all port/connection type munging here (accelerated mode,
# fixing up options for ssh, etc.)? and 'smart' conversion
conn_type = self._connection_info.connection
if conn_type == 'smart':
conn_type = 'ssh'
connection = connection_loader.get(conn_type, self._connection_info)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
connection.connect()
return connection
def _get_action_handler(self, connection):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % module_name)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = action_loader.get(
handler_name,
task=self._task,
connection=connection,
connection_info=self._connection_info,
loader=self._loader,
module_loader=self._module_loader,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
def _compute_delegate(self, variables):
# get the vars for the delegate by its name
try:
this_info = variables['hostvars'][self._task.delegate_to]
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
# get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
#self._connection_info.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
self._connection_info.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port)
self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password)
self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file)
self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection)
self._connection_info.sudo_pass = this_info.get('ansible_sudo_pass', self._connection_info.sudo_pass)
if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'):
self._connection_info.connection = 'local'
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
#if delegate['private_key_file'] is None:
# delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
#if delegate['private_key_file'] is not None:
# delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
for i in this_info:
if i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = this_info[i]
| a13m/ansible | v2/ansible/executor/task_executor.py | Python | gpl-3.0 | 16,471 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import locale
from collections import OrderedDict
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from pootle.core.cache import make_method_key
from pootle.core.mixins import TreeItem
from pootle.core.url_helpers import get_editor_filter
from pootle.i18n.gettext import tr_lang, language_dir
class LanguageManager(models.Manager):
def get_queryset(self):
"""Mimics `select_related(depth=1)` behavior. Pending review."""
return (
super(LanguageManager, self).get_queryset().select_related(
'directory',
)
)
class LiveLanguageManager(models.Manager):
"""Manager that only considers `live` languages.
A live language is any language containing at least a project with
translatable files.
"""
def get_queryset(self):
return super(LiveLanguageManager, self).get_queryset().filter(
translationproject__isnull=False,
project__isnull=True,
).distinct()
def cached_dict(self, locale_code='en-us'):
"""Retrieves a sorted list of live language codes and names.
:param locale_code: the UI locale for which language full names need to
be localized.
:return: an `OrderedDict`
"""
key = make_method_key(self, 'cached_dict', locale_code)
languages = cache.get(key, None)
if languages is None:
languages = OrderedDict(
sorted([(lang[0], tr_lang(lang[1]))
for lang in self.values_list('code', 'fullname')],
cmp=locale.strcoll,
key=lambda x: x[1])
)
cache.set(key, languages, settings.POOTLE_CACHE_TIMEOUT)
return languages
class Language(models.Model, TreeItem):
code_help_text = _('ISO 639 language code for the language, possibly '
'followed by an underscore (_) and an ISO 3166 country code. '
'<a href="http://www.w3.org/International/articles/language-tags/">'
'More information</a>')
code = models.CharField(max_length=50, null=False, unique=True,
db_index=True, verbose_name=_("Code"), help_text=code_help_text)
fullname = models.CharField(max_length=255, null=False,
verbose_name=_("Full Name"))
specialchars_help_text = _('Enter any special characters that users '
'might find difficult to type')
specialchars = models.CharField(max_length=255, blank=True,
verbose_name=_("Special Characters"),
help_text=specialchars_help_text)
plurals_help_text = _('For more information, visit '
'<a href="http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/l10n/pluralforms.html">'
'our page</a> on plural forms.')
nplural_choices = (
(0, _('Unknown')), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)
)
nplurals = models.SmallIntegerField(default=0, choices=nplural_choices,
verbose_name=_("Number of Plurals"), help_text=plurals_help_text)
pluralequation = models.CharField(max_length=255, blank=True,
verbose_name=_("Plural Equation"), help_text=plurals_help_text)
directory = models.OneToOneField('pootle_app.Directory', db_index=True,
editable=False)
objects = LanguageManager()
live = LiveLanguageManager()
class Meta:
ordering = ['code']
db_table = 'pootle_app_language'
############################ Properties ###################################
@property
def pootle_path(self):
return '/%s/' % self.code
@property
def name(self):
"""Localized fullname for the language."""
return tr_lang(self.fullname)
############################ Methods ######################################
@property
def direction(self):
"""Return the language direction."""
return language_dir(self.code)
def __unicode__(self):
return u"%s - %s" % (self.name, self.code)
def __init__(self, *args, **kwargs):
super(Language, self).__init__(*args, **kwargs)
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.fullname)
def save(self, *args, **kwargs):
# create corresponding directory object
from pootle_app.models.directory import Directory
self.directory = Directory.objects.root.get_or_make_subdir(self.code)
super(Language, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
directory = self.directory
super(Language, self).delete(*args, **kwargs)
directory.delete()
def get_absolute_url(self):
return reverse('pootle-language-browse', args=[self.code])
def get_translate_url(self, **kwargs):
return u''.join([
reverse('pootle-language-translate', args=[self.code]),
get_editor_filter(**kwargs),
])
def clean(self):
super(Language, self).clean()
if self.fullname:
self.fullname = self.fullname.strip()
### TreeItem
def get_children(self):
return self.translationproject_set.live()
def get_cachekey(self):
return self.directory.pootle_path
### /TreeItem
@receiver([post_delete, post_save])
def invalidate_language_list_cache(sender, instance, **kwargs):
# XXX: maybe use custom signals or simple function calls?
if instance.__class__.__name__ not in ['Language', 'TranslationProject']:
return
key = make_method_key('LiveLanguageManager', 'cached_dict', '*')
cache.delete_pattern(key)
| Avira/pootle | pootle/apps/pootle_language/models.py | Python | gpl-3.0 | 6,196 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from six import iteritems
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
v = VariableManager()
vars = v.get_vars(loader=fake_loader, use_cache=False)
if 'omit' in vars:
del vars['omit']
if 'vars' in vars:
del vars['vars']
if 'ansible_version' in vars:
del vars['ansible_version']
self.assertEqual(vars, dict(playbook_dir='.'))
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.extra_vars = extra_vars
vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
""",
"other_path/host_vars/hostname1.yml": """
foo: bam
baa: bat
""",
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
v.add_host_vars_file("other_path/host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], [dict(foo="bar"), dict(foo="bam", baa="bat")])
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
mock_host.get_group_vars.return_value = dict()
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bam")
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("baa"), "bat")
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/all.yml": """
foo: bar
""",
"group_vars/somegroup.yml": """
bam: baz
""",
"other_path/group_vars/somegroup.yml": """
baa: bat
"""
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
v.add_group_vars_file("other_path/group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["all"], [dict(foo="bar")])
self.assertEqual(v._group_vars_files["somegroup"], [dict(bam="baz"), dict(baa="bat")])
mock_group = MagicMock()
mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
mock_host.get_group_vars.return_value = dict()
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
self.assertEqual(vars.get("baa"), "bat")
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
def test_variable_manager_precedence(self):
'''
Tests complex variations and combinations of get_vars() with different
objects to modify the context under which variables are merged.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
""",
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
inv1 = Inventory(loader=fake_loader, variable_manager=v, host_list='/etc/ansible/inventory1')
inv1.set_playbook_basedir('./')
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.get_vars(loader=fake_loader, play=play1)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
# next, we assert that when vars are viewed from the context of a task within a
# role, that task will see its own role defaults before any other role's
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
# next we assert the precendence of inventory variables
v.set_inventory(inv1)
h1 = inv1.get_host('host1')
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
# next we test with group_vars/ files loaded
fake_loader.push("/etc/ansible/group_vars/all", """
group_var_all: group_var_all_from_group_vars_all
""")
fake_loader.push("/etc/ansible/group_vars/group1", """
group_var: group_var_from_group_vars_group1
""")
fake_loader.push("/etc/ansible/group_vars/group3", """
# this is a dummy, which should not be used anywhere
group_var: group_var_from_group_vars_group3
""")
fake_loader.push("/etc/ansible/host_vars/host1", """
host_var: host_var_from_host_vars_host1
""")
v.add_group_vars_file("/etc/ansible/group_vars/all", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group1", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group2", loader=fake_loader)
v.add_host_vars_file("/etc/ansible/host_vars/host1", loader=fake_loader)
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
# add in the fact cache
v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
| rasata/ansible | test/units/vars/test_variable_manager.py | Python | gpl-3.0 | 10,563 |
from sympy import symbols, Dij, LeviCivita
x, y = symbols('x,y')
def test_Dij():
assert Dij(1, 1) == 1
assert Dij(1, 2) == 0
assert Dij(x, x) == 1
assert Dij(x**2-y**2, x**2-y**2) == 1
def test_levicivita():
assert LeviCivita(1, 2, 3) == 1
assert LeviCivita(1, 3, 2) == -1
assert LeviCivita(1, 2, 2) == 0
i,j,k = symbols('i j k')
assert LeviCivita(i, j, k) == LeviCivita(i,j,k, evaluate=False)
assert LeviCivita(i, j, i) == 0
assert LeviCivita(1, i, i) == 0
assert LeviCivita(i, j, k).doit() == (j - i)*(k - i)*(k - j)/2
assert LeviCivita(1, 2, 3, 1) == 0
assert LeviCivita(4, 5, 1, 2, 3) == 1
assert LeviCivita(4, 5, 2, 1, 3) == -1
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/functions/special/tests/test_tensor_functions.py | Python | agpl-3.0 | 698 |
from sympy import Function, symbols, S, sqrt, rf, factorial
from sympy.solvers.recurr import rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper
y = Function('y')
n, k = symbols('n,k', integer=True)
C0, C1, C2 = symbols('C0,C1,C2')
def test_rsolve_poly():
assert rsolve_poly([-1, -1, 1], 0, n) == 0
assert rsolve_poly([-1, -1, 1], 1, n) == -1
assert rsolve_poly([-1, n+1], n, n) == 1
assert rsolve_poly([-1, 1], n, n) == C0 + (n**2 - n)/2
assert rsolve_poly([-n-1, n], 1, n) == C1*n - 1
assert rsolve_poly([-4*n-2, 1], 4*n+1, n) == -1
assert rsolve_poly([-1, 1], n**5 + n**3, n) == C0 - n**3 / 2 - n**5 / 2 + n**2 / 6 + n**6 / 6 + 2*n**4 / 3
def test_rsolve_ratio():
solution = rsolve_ratio([-2*n**3+n**2+2*n-1, 2*n**3+n**2-6*n,
-2*n**3-11*n**2-18*n-9, 2*n**3+13*n**2+22*n+8], 0, n)
assert solution in [
C1*((-2*n + 3)/(n**2 - 1))/3,
(S(1)/2)*(C1*(-3 + 2*n)/(-1 + n**2)),
(S(1)/2)*(C1*( 3 - 2*n)/( 1 - n**2)),
(S(1)/2)*(C2*(-3 + 2*n)/(-1 + n**2)),
(S(1)/2)*(C2*( 3 - 2*n)/( 1 - n**2)),
]
def test_rsolve_hyper():
assert rsolve_hyper([-1, -1, 1], 0, n) in [
C0*(S.Half - S.Half*sqrt(5))**n + C1*(S.Half + S.Half*sqrt(5))**n,
C1*(S.Half - S.Half*sqrt(5))**n + C0*(S.Half + S.Half*sqrt(5))**n,
]
assert rsolve_hyper([n**2-2, -2*n-1, 1], 0, n) in [
C0*rf(sqrt(2), n) + C1*rf(-sqrt(2), n),
C1*rf(sqrt(2), n) + C0*rf(-sqrt(2), n),
]
assert rsolve_hyper([n**2-k, -2*n-1, 1], 0, n) in [
C0*rf(sqrt(k), n) + C1*rf(-sqrt(k), n),
C1*rf(sqrt(k), n) + C0*rf(-sqrt(k), n),
]
assert rsolve_hyper([2*n*(n+1), -n**2-3*n+2, n-1], 0, n) == C0*factorial(n) + C1*2**n
assert rsolve_hyper([n + 2, -(2*n + 3)*(17*n**2 + 51*n + 39), n + 1], 0, n) == 0
assert rsolve_hyper([-n-1, -1, 1], 0, n) == 0
assert rsolve_hyper([-1, 1], n, n).expand() == C0 + n**2/2 - n/2
assert rsolve_hyper([-1, 1], 1+n, n).expand() == C0 + n**2/2 + n/2
assert rsolve_hyper([-1, 1], 3*(n+n**2), n).expand() == C0 + n**3 - n
def recurrence_term(c, f):
"""Compute RHS of recurrence in f(n) with coefficients in c."""
return sum(c[i]*f.subs(n, n+i) for i in range(len(c)))
def rsolve_bulk_checker(solver, c, q, p):
"""Used by test_rsolve_bulk."""
pp = solver(c, q, n)
assert pp == p
def test_rsolve_bulk():
"""Some bulk-generated tests."""
funcs = [ n, n+1, n**2, n**3, n**4, n+n**2, 27*n + 52*n**2 - 3*n**3 + 12*n**4 - 52*n**5 ]
coeffs = [ [-2, 1], [-2, -1, 1], [-1, 1, 1, -1, 1], [-n, 1], [n**2-n+12, 1] ]
for p in funcs:
# compute difference
for c in coeffs:
q = recurrence_term(c, p)
if p.is_polynomial(n):
yield rsolve_bulk_checker, rsolve_poly, c, q, p
#if p.is_hypergeometric(n):
# yield rsolve_bulk_checker, rsolve_hyper, c, q, p
def test_rsolve():
f = y(n+2) - y(n+1) - y(n)
h = sqrt(5)*(S.Half + S.Half*sqrt(5))**n \
- sqrt(5)*(S.Half - S.Half*sqrt(5))**n
assert rsolve(f, y(n)) in [
C0*(S.Half - S.Half*sqrt(5))**n + C1*(S.Half + S.Half*sqrt(5))**n,
C1*(S.Half - S.Half*sqrt(5))**n + C0*(S.Half + S.Half*sqrt(5))**n,
]
assert rsolve(f, y(n), [ 0, 5 ]) == h
assert rsolve(f, y(n), { 0 :0, 1 :5 }) == h
assert rsolve(f, y(n), { y(0):0, y(1):5 }) == h
f = (n-1)*y(n+2) - (n**2+3*n-2)*y(n+1) + 2*n*(n+1)*y(n)
g = C0*factorial(n) + C1*2**n
h = -3*factorial(n) + 3*2**n
assert rsolve(f, y(n)) == g
assert rsolve(f, y(n), [ 0, 3 ]) == h
assert rsolve(f, y(n), { 0 :0, 1 :3 }) == h
assert rsolve(f, y(n), { y(0):0, y(1):3 }) == h
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/solvers/tests/test_recurr.py | Python | agpl-3.0 | 3,721 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi Store',
'version': '8.0.1.0.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Multi Store
===========
The main purpose of this module is to restrict journals access for users on different stores.
This module add a new concept "stores" in some point similar to multicompany.
Similar to multicompany:
* User can have multiple stores available (store_ids)
* User can be active only in one store (store_id) which can be set up in his own preferences
* There is a group "multi store" that gives users the availability to see multi store fields
This module also adds a store_id field on journal:
* If store_id = False then journal can be seen by everyone
* If store_id is set, then journal can be seen by users on that store and parent stores
It also restrict edition, creation and unlink on: account.move, account.invoice and account.voucher.
It is done with the same logic to journal. We do not limitate the "read" of this models because user should need to access those documents, for example, to see partner due.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'account_voucher',
],
'data': [
'views/res_store_view.xml',
'views/res_users_view.xml',
'views/account_view.xml',
'security/multi_store_security.xml',
'security/ir.model.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | sysadminmatmoz/ingadhoc | multi_store/__openerp__.py | Python | agpl-3.0 | 2,617 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLrudict(PythonPackage):
""" A fast LRU cache"""
homepage = "https://github.com/amitdev/lru-dict"
url = "https://pypi.io/packages/source/l/lru-dict/lru-dict-1.1.6.tar.gz"
version('1.1.6', 'b33f54f1257ab541f4df4bacc7509f5a')
depends_on('python@2.7:')
depends_on('py-setuptools', type=('build'))
| krafczyk/spack | var/spack/repos/builtin/packages/py-lrudict/package.py | Python | lgpl-2.1 | 1,591 |
{
'name' : 'Product status at website shop',
'version' : '1.0.1',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category' : 'Sale',
'website' : 'https://yelizariev.github.io',
'depends' : ['website_sale', 'stock'],
'data':[
'website_sale_stock_status_views.xml',
'website_sale_stock_status_data.xml',
],
'installable': True
}
| Endika/website-addons | website_sale_stock_status/__openerp__.py | Python | lgpl-3.0 | 408 |
# -*- coding: utf-8 -*-
import functools
import httplib as http
import logging
import time
import bleach
from django.db.models import Q
from flask import request
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from framework import sentry
from website import language
from osf.models import OSFUser, AbstractNode
from website import settings
from website.project.views.contributor import get_node_contributors_abbrev
from website.ember_osf_web.decorators import ember_flag_is_active
from website.search import exceptions
import website.search.search as search
from website.search.util import build_query
logger = logging.getLogger(__name__)
RESULTS_PER_PAGE = 250
def handle_search_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.MalformedQueryError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad search query',
'message_long': language.SEARCH_QUERY_HELP,
})
except exceptions.SearchUnavailableError:
raise HTTPError(http.SERVICE_UNAVAILABLE, data={
'message_short': 'Search unavailable',
'message_long': ('Our search service is currently unavailable, if the issue persists, '
+ language.SUPPORT_LINK),
})
except exceptions.SearchException:
# Interim fix for issue where ES fails with 500 in some settings- ensure exception is still logged until it can be better debugged. See OSF-4538
sentry.log_exception()
sentry.log_message('Elasticsearch returned an unexpected error response')
# TODO: Add a test; may need to mock out the error response due to inability to reproduce error code locally
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Could not perform search query',
'message_long': language.SEARCH_QUERY_HELP,
})
return wrapped
@handle_search_errors
def search_search(**kwargs):
_type = kwargs.get('type', None)
tick = time.time()
results = {}
if request.method == 'POST':
results = search.search(request.get_json(), doc_type=_type)
elif request.method == 'GET':
q = request.args.get('q', '*')
# TODO Match javascript params?
start = request.args.get('from', '0')
size = request.args.get('size', '10')
results = search.search(build_query(q, start, size), doc_type=_type)
results['time'] = round(time.time() - tick, 2)
return results
@ember_flag_is_active('ember_search_page')
def search_view():
return {'shareUrl': settings.SHARE_URL},
def conditionally_add_query_item(query, item, condition, value):
""" Helper for the search_projects_by_title function which will add a condition to a query
It will give an error if the proper search term is not used.
:param query: The modular ODM query that you want to modify
:param item: the field to query on
:param condition: yes, no, or either
:return: the modified query
"""
condition = condition.lower()
if condition == 'yes':
return query & Q(**{item: value})
elif condition == 'no':
return query & ~Q(**{item: value})
elif condition == 'either':
return query
raise HTTPError(http.BAD_REQUEST)
@must_be_logged_in
def search_projects_by_title(**kwargs):
""" Search for nodes by title. Can pass in arguments from the URL to modify the search
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
# TODO(fabianvf): At some point, it would be nice to do this with elastic search
user = kwargs['auth'].user
term = request.args.get('term', '')
max_results = int(request.args.get('maxResults', '10'))
category = request.args.get('category', 'project').lower()
is_deleted = request.args.get('isDeleted', 'no').lower()
is_collection = request.args.get('isFolder', 'no').lower()
is_registration = request.args.get('isRegistration', 'no').lower()
include_public = request.args.get('includePublic', 'yes').lower()
include_contributed = request.args.get('includeContributed', 'yes').lower()
ignore_nodes = request.args.getlist('ignoreNode', [])
matching_title = Q(
title__icontains=term, # search term (case insensitive)
category=category # is a project
)
matching_title = conditionally_add_query_item(matching_title, 'is_deleted', is_deleted, True)
matching_title = conditionally_add_query_item(matching_title, 'type', is_registration, 'osf.registration')
matching_title = conditionally_add_query_item(matching_title, 'type', is_collection, 'osf.collection')
if len(ignore_nodes) > 0:
for node_id in ignore_nodes:
matching_title = matching_title & ~Q(_id=node_id)
my_projects = []
my_project_count = 0
public_projects = []
if include_contributed == 'yes':
my_projects = AbstractNode.objects.filter(
matching_title &
Q(_contributors=user) # user is a contributor
)[:max_results]
my_project_count = my_project_count
if my_project_count < max_results and include_public == 'yes':
public_projects = AbstractNode.objects.filter(
matching_title &
Q(is_public=True) # is public
)[:max_results - my_project_count]
results = list(my_projects) + list(public_projects)
ret = process_project_search_results(results, **kwargs)
return ret
@must_be_logged_in
def process_project_search_results(results, **kwargs):
"""
:param results: list of projects from the modular ODM search
:return: we return the entire search result, which is a list of
dictionaries. This includes the list of contributors.
"""
user = kwargs['auth'].user
ret = []
for project in results:
authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth'])
authors_html = ''
for author in authors['contributors']:
a = OSFUser.load(author['user_id'])
authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname)
authors_html += author['separator'] + ' '
authors_html += ' ' + authors['others_count']
ret.append({
'id': project._id,
'label': project.title,
'value': project.title,
'category': 'My Projects' if user in project.contributors else 'Public Projects',
'authors': authors_html,
})
return ret
@collect_auth
def search_contributor(auth):
user = auth.user if auth else None
nid = request.args.get('excludeNode')
exclude = AbstractNode.load(nid).contributors if nid else []
# TODO: Determine whether bleach is appropriate for ES payload. Also, inconsistent with website.sanitize.util.strip_html
query = bleach.clean(request.args.get('query', ''), tags=[], strip=True)
page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True))
size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True))
return search.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=user)
| erinspace/osf.io | website/search/views.py | Python | apache-2.0 | 8,117 |
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import socket
import unittest
from StringIO import StringIO
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from test.unit.proxy.test_server import fake_http_connect
from swift.common import client as c
class TestHttpHelpers(unittest.TestCase):
def test_quote(self):
value = 'standard string'
self.assertEquals('standard%20string', c.quote(value))
value = u'\u0075nicode string'
self.assertEquals('unicode%20string', c.quote(value))
def test_http_connection(self):
url = 'http://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPConnection))
url = 'https://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPSConnection))
url = 'ftp://www.test.com'
self.assertRaises(c.ClientException, c.http_connection, url)
class TestClientException(unittest.TestCase):
def test_is_exception(self):
self.assertTrue(issubclass(c.ClientException, Exception))
def test_format(self):
exc = c.ClientException('something failed')
self.assertTrue('something failed' in str(exc))
test_kwargs = (
'scheme',
'host',
'port',
'path',
'query',
'status',
'reason',
'device',
)
for value in test_kwargs:
kwargs = {
'http_%s' % value: value,
}
exc = c.ClientException('test', **kwargs)
self.assertTrue(value in str(exc))
class TestJsonImport(unittest.TestCase):
def tearDown(self):
try:
import json
except ImportError:
pass
else:
reload(json)
try:
import simplejson
except ImportError:
pass
else:
reload(simplejson)
def test_any(self):
self.assertTrue(hasattr(c, 'json_loads'))
def test_no_simplejson(self):
# break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
reload(c)
try:
from json import loads
except ImportError:
# this case is stested in _no_json
pass
else:
self.assertEquals(loads, c.json_loads)
def test_no_json(self):
# first break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
# then break json
try:
import json
except ImportError:
# not installed, so we don't have to break it for these tests
_orig_dumps = None
else:
# before we break json, grab a copy of the orig_dumps function
_orig_dumps = json.dumps
delattr(json, 'loads')
reload(c)
if _orig_dumps:
# basic test of swift.common.client.json_loads using json.loads
data = {
'string': 'value',
'int': 0,
'bool': True,
'none': None,
}
json_string = _orig_dumps(data)
else:
# even more basic test using a hand encoded json string
data = ['value1', 'value2']
json_string = "['value1', 'value2']"
self.assertEquals(data, c.json_loads(json_string))
self.assertRaises(AttributeError, c.json_loads, self)
class MockHttpTest(unittest.TestCase):
def setUp(self):
def fake_http_connection(*args, **kwargs):
_orig_http_connection = c.http_connection
def wrapper(url, proxy=None):
parsed, _conn = _orig_http_connection(url, proxy=proxy)
conn = fake_http_connect(*args, **kwargs)()
def request(*args, **kwargs):
return
conn.request = request
conn.has_been_read = False
_orig_read = conn.read
def read(*args, **kwargs):
conn.has_been_read = True
return _orig_read(*args, **kwargs)
conn.read = read
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def tearDown(self):
reload(c)
# TODO: following tests are placeholders, need more tests, better coverage
class TestGetAuth(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(url, None)
self.assertEquals(token, None)
class TestGetAccount(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_account('http://www.test.com', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadAccount(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.head_account('http://www.tests.com', 'asdf')
# TODO: Hmm. This doesn't really test too much as it uses a fake that
# always returns the same dict. I guess it "exercises" the code, so
# I'll leave it for now.
self.assertEquals(type(value), dict)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_account,
'http://www.tests.com', 'asdf')
class TestGetContainer(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadContainer(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_container,
'http://www.test.com', 'asdf', 'asdf',
)
class TestPutContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.put_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestDeleteContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestGetObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.get_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestHeadObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestPutObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
value = c.put_object(*args)
self.assertTrue(isinstance(value, basestring))
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
self.assertRaises(c.ClientException, c.put_object, *args)
class TestPostObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', {})
value = c.post_object(*args)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.post_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf', {})
class TestDeleteObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_object('http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.delete_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestConnection(MockHttpTest):
def test_instance(self):
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(conn.retries, 5)
def test_retry(self):
c.http_connection = self.fake_http_connection(500)
def quick_sleep(*args):
pass
c.sleep = quick_sleep
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertRaises(c.ClientException, conn.head_account)
self.assertEquals(conn.attempts, conn.retries + 1)
def test_resp_read_on_server_error(self):
c.http_connection = self.fake_http_connection(500)
conn = c.Connection('http://www.test.com', 'asdf', 'asdf', retries=0)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
conn.get_auth = get_auth
self.url, self.token = conn.get_auth()
method_signatures = (
(conn.head_account, []),
(conn.get_account, []),
(conn.head_container, ('asdf',)),
(conn.get_container, ('asdf',)),
(conn.put_container, ('asdf',)),
(conn.delete_container, ('asdf',)),
(conn.head_object, ('asdf', 'asdf')),
(conn.get_object, ('asdf', 'asdf')),
(conn.put_object, ('asdf', 'asdf', 'asdf')),
(conn.post_object, ('asdf', 'asdf', {})),
(conn.delete_object, ('asdf', 'asdf')),
)
for method, args in method_signatures:
self.assertRaises(c.ClientException, method, *args)
try:
self.assertTrue(conn.http_conn[1].has_been_read)
except AssertionError:
msg = '%s did not read resp on server error' % method.__name__
self.fail(msg)
except Exception, e:
raise e.__class__("%s - %s" % (method.__name__, e))
def test_reauth(self):
c.http_connection = self.fake_http_connection(401)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
def swap_sleep(*args):
self.swap_sleep_called = True
c.get_auth = get_auth
c.http_connection = self.fake_http_connection(200)
c.sleep = swap_sleep
self.swap_sleep_called = False
conn = c.Connection('http://www.test.com', 'asdf', 'asdf',
preauthurl='http://www.old.com',
preauthtoken='old',
)
self.assertEquals(conn.attempts, 0)
self.assertEquals(conn.url, 'http://www.old.com')
self.assertEquals(conn.token, 'old')
value = conn.head_account()
self.assertTrue(self.swap_sleep_called)
self.assertEquals(conn.attempts, 2)
self.assertEquals(conn.url, 'http://www.new.com')
self.assertEquals(conn.token, 'new')
def test_reset_stream(self):
class LocalContents(object):
def __init__(self, tell_value=0):
self.already_read = False
self.seeks = []
self.tell_value = tell_value
def tell(self):
return self.tell_value
def seek(self, position):
self.seeks.append(position)
self.already_read = False
def read(self, size=-1):
if self.already_read:
return ''
else:
self.already_read = True
return 'abcdef'
class LocalConnection(object):
def putrequest(self, *args, **kwargs):
return
def putheader(self, *args, **kwargs):
return
def endheaders(self, *args, **kwargs):
return
def send(self, *args, **kwargs):
raise socket.error('oops')
def request(self, *args, **kwargs):
return
def getresponse(self, *args, **kwargs):
self.status = 200
return self
def getheader(self, *args, **kwargs):
return ''
def read(self, *args, **kwargs):
return ''
def local_http_connection(url, proxy=None):
parsed = urlparse(url)
return parsed, LocalConnection()
orig_conn = c.http_connection
try:
c.http_connection = local_http_connection
conn = c.Connection('http://www.example.com', 'asdf', 'asdf',
retries=1, starting_backoff=.0001)
contents = LocalContents()
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [0])
self.assertEquals(str(exc), 'oops')
contents = LocalContents(tell_value=123)
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [123])
self.assertEquals(str(exc), 'oops')
contents = LocalContents()
contents.tell = None
exc = None
try:
conn.put_object('c', 'o', contents)
except c.ClientException, err:
exc = err
self.assertEquals(contents.seeks, [])
self.assertEquals(str(exc), "put_object('c', 'o', ...) failure "
"and no ability to reset contents for reupload.")
finally:
c.http_connection = orig_conn
if __name__ == '__main__':
unittest.main()
| Intel-bigdata/swift | test/unit/common/test_client.py | Python | apache-2.0 | 15,155 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Base64Encode
# Returns the specified text or file as a Base64 encoded string.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Base64Encode(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Base64Encode Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Base64Encode, self).__init__(temboo_session, '/Library/Utilities/Encoding/Base64Encode')
def new_input_set(self):
return Base64EncodeInputSet()
def _make_result_set(self, result, path):
return Base64EncodeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return Base64EncodeChoreographyExecution(session, exec_id, path)
class Base64EncodeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Base64Encode
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((conditional, string) The text that should be Base64 encoded. Required unless providing a value for the URL input.)
"""
super(Base64EncodeInputSet, self)._set_input('Text', value)
def set_URL(self, value):
"""
Set the value of the URL input for this Choreo. ((conditional, string) A URL to a hosted file that should be Base64 encoded. Required unless providing a value for the Text input.)
"""
super(Base64EncodeInputSet, self)._set_input('URL', value)
class Base64EncodeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Base64Encode Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Base64EncodedText(self):
"""
Retrieve the value for the "Base64EncodedText" output from this Choreo execution. ((string) The Base64 encoded text.)
"""
return self._output.get('Base64EncodedText', None)
class Base64EncodeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return Base64EncodeResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Utilities/Encoding/Base64Encode.py | Python | apache-2.0 | 3,302 |
"""Translate python-phonenumbers PhoneNumber to/from protobuf PhoneNumber
Examples of use:
>>> import phonenumbers
>>> from phonenumbers.pb2 import phonenumber_pb2, PBToPy, PyToPB
>>> x_py = phonenumbers.PhoneNumber(country_code=44, national_number=7912345678)
>>> print x_py
Country Code: 44 National Number: 7912345678
>>> y_pb = phonenumber_pb2.PhoneNumber()
>>> y_pb.country_code = 44
>>> y_pb.national_number = 7912345678
>>> print str(y_pb).strip()
country_code: 44
national_number: 7912345678
>>> # Check italian_leading_zero default value when not set
>>> y_pb.italian_leading_zero
False
>>> y_py = PBToPy(y_pb)
>>> print y_py
Country Code: 44 National Number: 7912345678
>>> x_pb = PyToPB(x_py)
>>> print str(x_pb).strip()
country_code: 44
national_number: 7912345678
>>> x_py == y_py
True
>>> x_pb == y_pb
True
>>> # Explicitly set the field to its default
>>> y_pb.italian_leading_zero = y_pb.italian_leading_zero
>>> x_pb == y_pb
False
"""
from phonenumber_pb2 import PhoneNumber as PhoneNumberPB
from phonenumbers import PhoneNumber
def PBToPy(numpb):
"""Convert phonenumber_pb2.PhoneNumber to phonenumber.PhoneNumber"""
return PhoneNumber(numpb.country_code if numpb.HasField("country_code") else None,
numpb.national_number if numpb.HasField("national_number") else None,
numpb.extension if numpb.HasField("extension") else None,
numpb.italian_leading_zero if numpb.HasField("italian_leading_zero") else None,
numpb.raw_input if numpb.HasField("raw_input") else None,
numpb.country_code_source if numpb.HasField("country_code_source") else None,
numpb.preferred_domestic_carrier_code if numpb.HasField("preferred_domestic_carrier_code") else None)
def PyToPB(numobj):
"""Convert phonenumber.PhoneNumber to phonenumber_pb2.PhoneNumber"""
numpb = PhoneNumberPB()
if numobj.country_code is not None:
numpb.country_code = numobj.country_code
if numobj.national_number is not None:
numpb.national_number = numobj.national_number
if numobj.extension is not None:
numpb.extension = numobj.extension
if numobj.italian_leading_zero is not None:
numpb.italian_leading_zero = numobj.italian_leading_zero
if numobj.raw_input is not None:
numpb.raw_input = numobj.raw_input
if numobj.country_code_source is not None:
numpb.country_code_source = numobj.country_code_source
if numobj.preferred_domestic_carrier_code is not None:
numpb.preferred_domestic_carrier_code = numobj.preferred_domestic_carrier_code
return numpb
__all__ = ['PBToPy', 'PyToPB']
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| dongguangming/python-phonenumbers | python/phonenumbers/pb2/__init__.py | Python | apache-2.0 | 2,788 |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from HTMLParser import HTMLParser
from htmlentitydefs import entitydefs
NON_BREAKING_SPACE = u'\xA0'
class HtmlReader(HTMLParser):
IGNORE = 0
INITIAL = 1
PROCESS = 2
def __init__(self):
HTMLParser.__init__(self)
self._encoding = 'ISO-8859-1'
self._handlers = {'table_start' : self.table_start,
'table_end' : self.table_end,
'tr_start' : self.tr_start,
'tr_end' : self.tr_end,
'td_start' : self.td_start,
'td_end' : self.td_end,
'th_start' : self.td_start,
'th_end' : self.td_end,
'br_start' : self.br_start,
'meta_start' : self.meta_start}
def read(self, htmlfile, populator):
self.populator = populator
self.state = self.IGNORE
self.current_row = None
self.current_cell = None
for line in htmlfile.readlines():
self.feed(self._decode(line))
# Calling close is required by the HTMLParser but may cause problems
# if the same instance of our HtmlParser is reused. Currently it's
# used only once so there's no problem.
self.close()
self.populator.eof()
def _decode(self, line):
return line.decode(self._encoding)
def handle_starttag(self, tag, attrs):
handler = self._handlers.get(tag+'_start')
if handler is not None:
handler(attrs)
def handle_endtag(self, tag):
handler = self._handlers.get(tag+'_end')
if handler is not None:
handler()
def handle_data(self, data):
if self.state == self.IGNORE or self.current_cell is None:
return
if NON_BREAKING_SPACE in data:
data = data.replace(NON_BREAKING_SPACE, ' ')
self.current_cell.append(data)
def handle_entityref(self, name):
value = self._handle_entityref(name)
self.handle_data(value)
def _handle_entityref(self, name):
if name == 'apos': # missing from entitydefs
return "'"
try:
value = entitydefs[name]
except KeyError:
return '&'+name+';'
if value.startswith('&#'):
return unichr(int(value[2:-1]))
return value.decode('ISO-8859-1')
def handle_charref(self, number):
value = self._handle_charref(number)
self.handle_data(value)
def _handle_charref(self, number):
if number.startswith(('x', 'X')):
base = 16
number = number[1:]
else:
base = 10
try:
return unichr(int(number, base))
except ValueError:
return '&#'+number+';'
def unknown_decl(self, data):
# Ignore everything even if it's invalid. This kind of stuff comes
# at least from MS Excel
pass
def table_start(self, attrs=None):
self.state = self.INITIAL
self.current_row = None
self.current_cell = None
def table_end(self):
if self.current_row is not None:
self.tr_end()
self.state = self.IGNORE
def tr_start(self, attrs=None):
if self.current_row is not None:
self.tr_end()
self.current_row = []
def tr_end(self):
if self.current_row is None:
return
if self.current_cell is not None:
self.td_end()
if self.state == self.INITIAL:
accepted = self.populator.start_table(self.current_row)
self.state = self.PROCESS if accepted else self.IGNORE
elif self.state == self.PROCESS:
self.populator.add(self.current_row)
self.current_row = None
def td_start(self, attrs=None):
if self.current_cell is not None:
self.td_end()
if self.current_row is None:
self.tr_start()
self.current_cell = []
def td_end(self):
if self.current_cell is not None and self.state != self.IGNORE:
cell = ''.join(self.current_cell)
self.current_row.append(cell)
self.current_cell = None
def br_start(self, attrs=None):
self.handle_data('\n')
def meta_start(self, attrs):
encoding = self._get_encoding_from_meta(attrs)
if encoding:
self._encoding = encoding
def _get_encoding_from_meta(self, attrs):
valid_http_equiv = False
encoding = None
for name, value in attrs:
name = name.lower()
if name == 'charset': # html5
return value
if name == 'http-equiv' and value.lower() == 'content-type':
valid_http_equiv = True
if name == 'content':
encoding = self._get_encoding_from_content_attr(value)
return encoding if valid_http_equiv else None
def _get_encoding_from_content_attr(self, value):
for token in value.split(';'):
token = token.strip()
if token.lower().startswith('charset='):
return token[8:]
def handle_pi(self, data):
encoding = self._get_encoding_from_pi(data)
if encoding:
self._encoding = encoding
def _get_encoding_from_pi(self, data):
data = data.strip()
if not data.lower().startswith('xml '):
return None
if data.endswith('?'):
data = data[:-1]
for token in data.split():
if token.lower().startswith('encoding='):
encoding = token[9:]
if encoding.startswith("'") or encoding.startswith('"'):
encoding = encoding[1:-1]
return encoding
return None
| eric-stanley/robotframework | src/robot/parsing/htmlreader.py | Python | apache-2.0 | 6,458 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import callbacks as cbks
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine.topology import Network
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def _standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
if data is not None and hasattr(data, '__len__') and len(data):
raise ValueError('Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
for key, value in data.items():
if value.__class__.__name__ == 'DataFrame':
data[key] = value.values
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' + name +
'". Need data for each key in: ' + str(names))
arrays.append(data[name])
elif isinstance(data, list):
for key, value in enumerate(data):
if value.__class__.__name__ == 'DataFrame':
data[key] = value.values
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError(
'Error when checking model ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), but instead got '
'the following list of ' + str(len(data)) + ' arrays: ' +
str(data)[:200] + '...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' + str(data)[:200])
arrays = data
elif data.__class__.__name__ == 'DataFrame':
# test if data is a DataFrame, without pandas installed
arrays = data.values
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) > 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) + ' ' +
exception_prefix +
' arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError(
'Error when checking ' + exception_prefix + ': expected ' + names[i]
+ ' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' + str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shapes[i]) + ' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0: # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def set_of_lengths(x):
# return a set with the variation between
# different shapes, with None => 0
if x is None:
return {0}
else:
return set([0 if y is None else y.shape[0] for y in x])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' + str(
[x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' + str(
[y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' + str(
[w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss is losses.categorical_crossentropy:
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' + str(
y.shape) + ' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
Arguments:
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
Returns:
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
"""
if arrays is None:
return [None]
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(weights) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError('`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' %
(existing_classes - existing_class_weight))
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class Model(Network):
"""The `Model` class adds training & evaluation routines to a `Network`.
"""
def compile(self,
optimizer,
loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors is not None:
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError('When passing a list as `target_tensors`, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' +
str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError('Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
target_tensors_ = []
for name in self.output_names:
target_tensors_.append(target_tensors.get(name, None))
target_tensors = target_tensors_
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
if target_tensors is not None:
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2, name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1, name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare metrics.
self.metrics = metrics
self.weighted_metrics = weighted_metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
nested_weighted_metrics = _collect_metrics(weighted_metrics,
self.output_names)
def append_metric(layer_index, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_names[layer_index] + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[
i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
weighted_metric_fn = _weighted_masked_objective(acc_fn)
metric_name = metric_name_prefix + 'acc'
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = _weighted_masked_objective(metric_fn)
metric_name = metric_name_prefix + metric_fn.__name__
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=masks[i])
append_metric(i, metric_name, metric_result)
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are consistent (i.e. have the same
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?')
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
training_updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _check_num_samples(self,
ins,
batch_size=None,
steps=None,
steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None:
num_samples = None
if batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
elif ins and hasattr(ins[0], 'shape'):
num_samples = ins[0].shape[0]
else:
raise ValueError('Either the input data should have '
'a defined shape, or ' + steps_name +
' should be specified.')
return num_samples
def _fit_loop(self,
f,
ins,
out_labels=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_f=None,
val_ins=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
Arguments:
f: Keras function returning a list of tensors
ins: List of tensors to be fed to `f`
out_labels: List of strings, display names of
the outputs of `f`
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: List of tensors to be fed to `val_f`
shuffle: Whether to shuffle the data at the beginning of each epoch
callback_metrics: List of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
Returns:
`History` object.
Raises:
ValueError: In case of invalid argument values.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if (verbose and ins and
hasattr(ins[0], 'shape') and hasattr(val_ins[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if validation_steps:
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
do_validation = True
num_train_samples = self._check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
if steps_per_epoch is not None:
count_mode = 'steps'
else:
count_mode = 'samples'
callbacks += [cbks.ProgbarLogger(count_mode)]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
for step_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = step_index
batch_logs['size'] = 1
callbacks.on_batch_begin(step_index, batch_logs)
outs = f(ins)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation:
val_outs = self._test_loop(
val_f,
val_ins,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
else:
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = self._test_loop(
val_f, val_ins, batch_size=batch_size, verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=None, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
if steps is not None:
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= steps
else:
if verbose == 1:
progbar = Progbar(target=num_samples)
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self,
x,
y,
sample_weight=None,
class_weight=None,
check_batch_axis=True,
batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
output_shapes.append(output_shape[:-1] + (1,))
else:
output_shapes.append(output_shape)
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='input')
y = _standardize_input_data(
y,
self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [
_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
self._feed_sample_weight_modes)
]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y, self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _get_deduped_metrics_names(self):
out_labels = self.metrics_names
# Rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows).
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
return deduped_out_labels
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
TensorFlow data tensors.
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
TensorFlow data tensors.
Can be `None` (default) if feeding from framework-native tensors.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, it will default to 32.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling.
validation_data: tuple `(x_val, y_val)` or tuple
`(x_val, y_val, val_sample_weights)` on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
This will override `validation_split`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of unique samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare validation data.
do_validation = False
val_ins = []
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' % len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (_slice_arrays(
sample_weights, 0, split_at), _slice_arrays(sample_weights, split_at))
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_steps:
do_validation = True
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = [0.]
# Prepare input arrays and training function.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
if do_validation:
self._make_test_function()
val_f = self.test_function
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
else:
val_f = None
callback_metrics = copy.copy(out_labels)
# Delegate logic to `_fit_loop`.
return self._fit_loop(
f,
ins,
out_labels=out_labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_f=val_f,
val_ins=val_ins,
shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
batch_size: Integer or `None`.
Number of samples per evaluation step.
If unspecified, `batch_size` will default to 32.
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
The default `None` is equal to the number of unique samples in
your dataset divided by the batch size.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid arguments.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare inputs, delegate logic to `_test_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `_predict_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0,
**kwargs):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data when using multiprocessing.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples of your dataset
divided by the batch size. Not used if using `Sequence`.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Maximum size for the generator queue
workers: Maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: If True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
shuffle: Whether to shuffle the data at the beginning of each
epoch. Only used with instances of `Sequence`
(`keras.utils.Sequence`).
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
**kwargs: support for legacy arguments.
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps_per_epoch = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
def evaluate_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Not used if using `Sequence`.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
return averages
def predict_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
**kwargs):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: Maximum size for the generator queue.
Not used if using `Sequence`.
workers: Maximum number of processes to spin up
when using process-based threading.
use_multiprocessing: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
**kwargs: support for legacy arguments.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if is_sequence:
steps = len(generator)
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
| Kongsea/tensorflow | tensorflow/python/keras/_impl/keras/engine/training.py | Python | apache-2.0 | 98,431 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
FIREWALL = "FIREWALL"
VPN = "VPN"
METERING = "METERING"
L3_ROUTER_NAT = "L3_ROUTER_NAT"
#maps extension alias to service type
EXT_TO_SERVICE_MAPPING = {
'dummy': DUMMY,
'lbaas': LOADBALANCER,
'fwaas': FIREWALL,
'vpnaas': VPN,
'metering': METERING,
'router': L3_ROUTER_NAT
}
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING,
L3_ROUTER_NAT]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
FIREWALL: "/fw",
VPN: "/vpn",
METERING: "/metering",
L3_ROUTER_NAT: "",
}
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
# FWaaS firewall rule action
FWAAS_ALLOW = "allow"
FWAAS_DENY = "deny"
# L3 Protocol name constants
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
| citrix-openstack-build/neutron | neutron/plugins/common/constants.py | Python | apache-2.0 | 1,771 |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for the Timelines flow."""
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
# pylint: disable=unused-import
from grr.lib.flows.general import timelines as _
# pylint: enable=unused-import
from grr.lib.rdfvalues import paths as rdf_paths
class TestTimelines(test_lib.FlowTestsBaseclass):
"""Test the timelines flow."""
client_id = "C.0000000000000005"
def testMACTimes(self):
"""Test that the timelining works with files."""
with test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, test_lib.ClientVFSHandlerFixture):
client_mock = action_mocks.ActionMock("ListDirectory")
output_path = "analysis/Timeline/MAC"
pathspec = rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS)
for _ in test_lib.TestFlowHelper(
"RecursiveListDirectory", client_mock, client_id=self.client_id,
pathspec=pathspec, token=self.token):
pass
# Now make a timeline
for _ in test_lib.TestFlowHelper(
"MACTimes", client_mock, client_id=self.client_id, token=self.token,
path="/", output=output_path):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
timestamp = 0
events = list(fd.Query("event.stat.pathspec.path contains grep"))
for event in events:
# Check the times are monotonously increasing.
self.assert_(event.event.timestamp >= timestamp)
timestamp = event.event.timestamp
self.assert_("grep" in event.event.stat.pathspec.path)
# 9 files, each having mac times = 27 events.
self.assertEqual(len(events), 27)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| statik/grr | lib/flows/general/timelines_test.py | Python | apache-2.0 | 1,939 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import logging
import os
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
slim = tf.contrib.slim
# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when
# newer version of Tensorflow becomes more common.
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
_ = importer.import_graph_def(input_graph_def, name='')
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
def get_frozen_graph_def(inference_graph_def, use_moving_averages,
input_checkpoint, output_node_names):
"""Freezes all variables in a graph definition."""
saver = None
if use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
else:
saver = tf.train.Saver()
frozen_graph_def = freeze_graph_with_def_protos(
input_graph_def=inference_graph_def,
input_saver_def=saver.as_saver_def(),
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
initializer_nodes='')
return frozen_graph_def
# TODO: Support batch tf example inputs.
def _tf_example_input_placeholder():
tf_example_placeholder = tf.placeholder(
tf.string, shape=[], name='tf_example')
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_placeholder)
image = tensor_dict[fields.InputDataFields.image]
return tf.expand_dims(image, axis=0)
def _image_tensor_input_placeholder():
return tf.placeholder(dtype=tf.uint8,
shape=(1, None, None, 3),
name='image_tensor')
def _encoded_image_string_tensor_input_placeholder():
image_str = tf.placeholder(dtype=tf.string,
shape=[],
name='encoded_image_string_tensor')
image_tensor = tf.image.decode_image(image_str, channels=3)
image_tensor.set_shape((None, None, 3))
return tf.expand_dims(image_tensor, axis=0)
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def _add_output_tensor_nodes(postprocessed_tensors):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
Returns:
A tensor dict containing the added output tensor nodes.
"""
label_id_offset = 1
boxes = postprocessed_tensors.get('detection_boxes')
scores = postprocessed_tensors.get('detection_scores')
classes = postprocessed_tensors.get('detection_classes') + label_id_offset
masks = postprocessed_tensors.get('detection_masks')
num_detections = postprocessed_tensors.get('num_detections')
outputs = {}
outputs['detection_boxes'] = tf.identity(boxes, name='detection_boxes')
outputs['detection_scores'] = tf.identity(scores, name='detection_scores')
outputs['detection_classes'] = tf.identity(classes, name='detection_classes')
outputs['num_detections'] = tf.identity(num_detections, name='num_detections')
if masks is not None:
outputs['detection_masks'] = tf.identity(masks, name='detection_masks')
return outputs
def _write_inference_graph(inference_graph_path,
checkpoint_path=None,
use_moving_averages=False,
output_node_names=(
'num_detections,detection_scores,'
'detection_boxes,detection_classes')):
"""Writes inference graph to disk with the option to bake in weights.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
inference_graph_path: Path to write inference graph.
checkpoint_path: Optional path to the checkpoint file.
use_moving_averages: Whether to export the original or the moving averages
of the trainable variables from the checkpoint.
output_node_names: Output tensor names, defaults are: num_detections,
detection_scores, detection_boxes, detection_classes.
"""
inference_graph_def = tf.get_default_graph().as_graph_def()
if checkpoint_path:
output_graph_def = get_frozen_graph_def(
inference_graph_def=inference_graph_def,
use_moving_averages=use_moving_averages,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
)
with gfile.GFile(inference_graph_path, 'wb') as f:
f.write(output_graph_def.SerializeToString())
logging.info('%d ops in the final graph.', len(output_graph_def.node))
return
tf.train.write_graph(inference_graph_def,
os.path.dirname(inference_graph_path),
os.path.basename(inference_graph_path),
as_text=False)
def _write_saved_model(inference_graph_path, inputs, outputs,
checkpoint_path=None, use_moving_averages=False):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
inference_graph_path: Path to write inference graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
checkpoint_path: Optional path to the checkpoint file.
use_moving_averages: Whether to export the original or the moving averages
of the trainable variables from the checkpoint.
"""
inference_graph_def = tf.get_default_graph().as_graph_def()
checkpoint_graph_def = None
if checkpoint_path:
output_node_names = ','.join(outputs.keys())
checkpoint_graph_def = get_frozen_graph_def(
inference_graph_def=inference_graph_def,
use_moving_averages=use_moving_averages,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names
)
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(checkpoint_graph_def)
builder = tf.saved_model.builder.SavedModelBuilder(inference_graph_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
checkpoint_path,
inference_graph_path,
export_as_saved_model=False):
"""Export helper."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
inputs = tf.to_float(input_placeholder_fn_map[input_type]())
preprocessed_inputs = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(preprocessed_inputs)
postprocessed_tensors = detection_model.postprocess(output_tensors)
outputs = _add_output_tensor_nodes(postprocessed_tensors)
out_node_names = list(outputs.keys())
if export_as_saved_model:
_write_saved_model(inference_graph_path, inputs, outputs, checkpoint_path,
use_moving_averages)
else:
_write_inference_graph(inference_graph_path, checkpoint_path,
use_moving_averages,
output_node_names=','.join(out_node_names))
def export_inference_graph(input_type, pipeline_config, checkpoint_path,
inference_graph_path, export_as_saved_model=False):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of [`image_tensor`,
`tf_example`].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
checkpoint_path: Path to the checkpoint file to freeze.
inference_graph_path: Path to write inference graph to.
export_as_saved_model: If the model should be exported as a SavedModel. If
false, it is saved as an inference graph.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
_export_inference_graph(input_type, detection_model,
pipeline_config.eval_config.use_moving_averages,
checkpoint_path, inference_graph_path,
export_as_saved_model)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/exporter.py | Python | bsd-2-clause | 13,779 |
from sympy.core.basic import Basic
from sympy import (sympify, eye, sin, cos, rot_axis1, rot_axis2,
rot_axis3, ImmutableMatrix as Matrix, Symbol)
from sympy.core.cache import cacheit
import sympy.vector
class Orienter(Basic):
"""
Super-class for all orienter classes.
"""
def rotation_matrix(self):
"""
The rotation matrix corresponding to this orienter
instance.
"""
return self._parent_orient
class AxisOrienter(Orienter):
"""
Class to denote an axis orienter.
"""
def __new__(cls, angle, axis):
if not isinstance(axis, sympy.vector.Vector):
raise TypeError("axis should be a Vector")
angle = sympify(angle)
obj = super(AxisOrienter, cls).__new__(cls, angle,
axis)
obj._angle = angle
obj._axis = axis
return obj
def __init__(self, angle, axis):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSys3D('N')
>>> from sympy.vector import AxisOrienter
>>> orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> B = N.orient_new('B', (orienter, ))
"""
# Dummy initializer for docstrings
pass
@cacheit
def rotation_matrix(self, system):
"""
The rotation matrix corresponding to this orienter
instance.
Parameters
==========
system : CoordSys3D
The coordinate system wrt which the rotation matrix
is to be computed
"""
axis = sympy.vector.express(self.axis, system).normalize()
axis = axis.to_matrix(system)
theta = self.angle
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) +
axis * axis.T)
parent_orient = parent_orient.T
return parent_orient
@property
def angle(self):
return self._angle
@property
def axis(self):
return self._axis
class ThreeAngleOrienter(Orienter):
"""
Super-class for Body and Space orienters.
"""
def __new__(cls, angle1, angle2, angle3, rot_order):
approved_orders = ('123', '231', '312', '132', '213',
'321', '121', '131', '212', '232',
'313', '323', '')
original_rot_order = rot_order
rot_order = str(rot_order).upper()
if not (len(rot_order) == 3):
raise TypeError('rot_order should be a str of length 3')
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if rot_order not in approved_orders:
raise TypeError('Invalid rot_type parameter')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
angle1 = sympify(angle1)
angle2 = sympify(angle2)
angle3 = sympify(angle3)
if cls._in_order:
parent_orient = (_rot(a1, angle1) *
_rot(a2, angle2) *
_rot(a3, angle3))
else:
parent_orient = (_rot(a3, angle3) *
_rot(a2, angle2) *
_rot(a1, angle1))
parent_orient = parent_orient.T
obj = super(ThreeAngleOrienter, cls).__new__(
cls, angle1, angle2, angle3, Symbol(original_rot_order))
obj._angle1 = angle1
obj._angle2 = angle2
obj._angle3 = angle3
obj._rot_order = original_rot_order
obj._parent_orient = parent_orient
return obj
@property
def angle1(self):
return self._angle1
@property
def angle2(self):
return self._angle2
@property
def angle3(self):
return self._angle3
@property
def rot_order(self):
return self._rot_order
class BodyOrienter(ThreeAngleOrienter):
"""
Class to denote a body-orienter.
"""
_in_order = True
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
Examples
========
>>> from sympy.vector import CoordSys3D, BodyOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> D = N.orient_new('D', (body_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> D = N.orient_new('D', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, D.j)
>>> D = D.orient_new('D', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, D.k)
>>> D = D.orient_new('D', (axis_orienter3, ))
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> body_orienter1 = BodyOrienter(q1, q2, q3, '123')
>>> body_orienter2 = BodyOrienter(q1, q2, 0, 'ZXZ')
>>> body_orienter3 = BodyOrienter(0, 0, 0, 'XYX')
"""
# Dummy initializer for docstrings
pass
class SpaceOrienter(ThreeAngleOrienter):
"""
Class to denote a space-orienter.
"""
_in_order = False
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
See Also
========
BodyOrienter : Orienter to orient systems wrt Euler angles.
Examples
========
>>> from sympy.vector import CoordSys3D, SpaceOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSys3D('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> D = N.orient_new('D', (space_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> B = N.orient_new('B', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, N.j)
>>> C = B.orient_new('C', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, N.k)
>>> D = C.orient_new('C', (axis_orienter3, ))
"""
# Dummy initializer for docstrings
pass
class QuaternionOrienter(Orienter):
"""
Class to denote a quaternion-orienter.
"""
def __new__(cls, q0, q1, q2, q3):
q0 = sympify(q0)
q1 = sympify(q1)
q2 = sympify(q2)
q3 = sympify(q3)
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 -
q3 ** 2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0 ** 2 - q1 ** 2 +
q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0 ** 2 - q1 ** 2 -
q2 ** 2 + q3 ** 2]]))
parent_orient = parent_orient.T
obj = super(QuaternionOrienter, cls).__new__(cls, q0, q1, q2, q3)
obj._q0 = q0
obj._q1 = q1
obj._q2 = q2
obj._q3 = q3
obj._parent_orient = parent_orient
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Quaternion orientation orients the new CoordSys3D with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSys3D('N')
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> B = N.orient_new('B', (q_orienter, ))
"""
# Dummy initializer for docstrings
pass
@property
def q0(self):
return self._q0
@property
def q1(self):
return self._q1
@property
def q2(self):
return self._q2
@property
def q3(self):
return self._q3
def _rot(axis, angle):
"""DCM for simple axis 1, 2 or 3 rotations. """
if axis == 1:
return Matrix(rot_axis1(angle).T)
elif axis == 2:
return Matrix(rot_axis2(angle).T)
elif axis == 3:
return Matrix(rot_axis3(angle).T)
| kaushik94/sympy | sympy/vector/orienters.py | Python | bsd-3-clause | 11,694 |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "VTK"
GIT_REPO = "http://vtk.org/VTK.git"
GIT_TAG = "v5.8.0"
VTK_BASE_VERSION = "vtk-5.8"
# this patch does three things:
# 1. adds try/catch blocks to all python method calls in order
# to trap bad_alloc exceptions
# 2. implements my scheme for turning all VTK errors into Python exceptions
# by making use of a special output window class
# 3. gives up the GIL around all VTK calls. This is also necessary
# for 2 not to deadlock on multi-cores.
EXC_PATCH = "pyvtk580_tryexcept_and_pyexceptions.diff"
# fixes attributes in vtkproperty for shader use in python
VTKPRPRTY_PATCH = "vtkProperty_PyShaderVar.diff"
# recent segfault with vtk 5.6.1 and wxPython 2.8.11.0
# see here for more info:
# http://vtk.1045678.n5.nabble.com/wx-python-scripts-segfault-td1234471.html
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH = "wxvtkrwi_displayid_segfault.diff"
dependencies = ['CMake']
class VTK58(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
self.exc_patch_src = os.path.join(config.patches_dir, EXC_PATCH)
self.exc_patch_dst = os.path.join(config.archive_dir, EXC_PATCH)
self.vtkprprty_patch_filename = os.path.join(config.patches_dir,
VTKPRPRTY_PATCH)
self.wxvtkrwi_displayid_segfault_patch_filename = os.path.join(
config.patches_dir,
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH)
config.VTK_LIB = os.path.join(self.inst_dir, 'lib')
# whatever the case may be, we have to register VTK variables
if os.name == 'nt':
# on Win, inst/VTK/bin contains the so files
config.VTK_SODIR = os.path.join(self.inst_dir, 'bin')
# inst/VTK/lib/site-packages the VTK python package
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'site-packages')
else:
# on *ix, inst/VTK/lib contains DLLs
config.VTK_SODIR = os.path.join(
config.VTK_LIB, VTK_BASE_VERSION)
# on *ix, inst/lib/python2.5/site-packages contains the
# VTK python package
# sys.version is (2, 5, 0, 'final', 0)
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'python%d.%d/site-packages' % \
sys.version_info[0:2])
# this contains the VTK cmake config (same on *ix and Win)
config.VTK_DIR = os.path.join(config.VTK_LIB, VTK_BASE_VERSION)
def get(self):
if os.path.exists(self.source_dir):
utils.output("VTK already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("git clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone VTK repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout VTK %s. Fix and try again." % (GIT_TAG,))
if not os.path.exists(self.exc_patch_dst):
utils.output("Applying EXC patch")
# we do this copy so we can see if the patch has been done yet or not
shutil.copyfile(self.exc_patch_src, self.exc_patch_dst)
os.chdir(self.source_dir)
# default git-generated patch, so needs -p1
ret = os.system(
"%s -p1 < %s" % (config.PATCH, self.exc_patch_dst))
if ret != 0:
utils.error(
"Could not apply EXC patch. Fix and try again.")
# # VTKPRPRTY PATCH
# utils.output("Applying VTKPRPRTY patch")
# os.chdir(os.path.join(self.source_dir, 'Rendering'))
# ret = os.system(
# "%s -p0 < %s" % (config.PATCH, self.vtkprprty_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply VTKPRPRTY patch. Fix and try again.")
# # WXVTKRWI_DISPLAYID_SEGFAULT patch
# utils.output("Applying VTKWXRWI_DISPLAYID_SEGFAULT patch")
# os.chdir(self.source_dir)
# # default git-generated patch, so needs -p1
# ret = os.system(
# "%s -p1 < %s" % (config.PATCH,
# self.wxvtkrwi_displayid_segfault_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply WXVTKRWI_DISPLAYID_SEGFAULT patch. Fix and try again.")
def unpack(self):
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("VTK build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = "-DBUILD_SHARED_LIBS=ON " \
"-DBUILD_TESTING=OFF " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_USE_TK=NO " \
"-DVTK_USE_METAIO=ON " \
"-DVTK_USE_PARALLEL=ON " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " \
"-DVTK_WRAP_PYTHON=ON " % (self.inst_dir,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure VTK. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkWidgetsPython.so')
nt_file = os.path.join(self.build_dir, 'bin', config.BUILD_TARGET,
'vtkWidgetsPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('VTK.sln')
if ret != 0:
utils.error("Error building VTK. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/vtkpython')
nt_file = os.path.join(self.inst_dir, 'bin', 'vtkpython.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already installed. Skipping build step.")
else:
# python 2.5.2 setup.py complains that this does not exist
# with VTK PV-3-2-1. This is only on installations with
# EasyInstall / Python Eggs, then the VTK setup.py uses
# EasyInstall and not standard distutils. gah!
# just tested with VTK 5.8.0 and Python 2.7.2
# it indeed installs VTK_PYTHON/VTK-5.8.0-py2.7.egg
# but due to the site.py and easy-install.pth magic in there,
# adding VTK_PYTHON to the PYTHONPATH still works. We can keep
# pip, yay!
if not os.path.exists(config.VTK_PYTHON):
os.makedirs(config.VTK_PYTHON)
os.chdir(self.build_dir)
# we save, set and restore the PP env variable, else
# stupid setuptools complains
save_env = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = config.VTK_PYTHON
ret = utils.make_command('VTK.sln', install=True)
os.environ['PYTHONPATH'] = save_env
if ret != 0:
utils.error("Could not install VTK. Fix and try again.")
# now do some surgery on VTKConfig.cmake and
# VTKLibraryDepends.cmake so builds of VTK-dependent libraries
# with only the DRE to link with Just Work(tm)
# on windows, we need to replace backslash with forward slash
# as that's the style used by the config files. On *ix mostly
# harmless
idp = re.sub(r'\\','/', config.inst_dir)
for fn in [os.path.join(config.VTK_DIR, 'VTKConfig.cmake'),
os.path.join(config.VTK_DIR, 'VTKLibraryDepends.cmake'),
os.path.join(config.VTK_DIR, 'VTKTargets-relwithdebinfo.cmake')]:
if os.path.exists(fn):
utils.re_sub_filter_file(
[(idp, '${VTK_INSTALL_PREFIX}/..')],
fn)
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
import vtk
return vtk.vtkVersion.GetVTKVersion()
| nagyistoce/devide.johannes | install_packages/ip_vtk58.py | Python | bsd-3-clause | 9,776 |
""" Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
To see the other parameters, look at the DenseDesignMatrix class
documentation
"""
def __init__(self, num_examples, *args, **kwargs):
X = np.zeros((num_examples, 1))
X[:, 0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X, *args, **kwargs)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
"""
Creates a random dense design matrix that has class labels.
Parameters
----------
rng : numpy.random.RandomState
The random number generator used to generate the dataset.
num_examples : int
The number of examples to create.
dim : int
The number of features in each example.
num_classes : int
The number of classes to assign the examples to.
0 indicates that no class labels will be generated.
"""
X = rng.randn(num_examples, dim)
if num_classes:
Y = rng.randint(0, num_classes, (num_examples, 1))
y_labels = num_classes
else:
Y = None
y_labels = None
return DenseDesignMatrix(X=X, y=Y, y_labels=y_labels)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples, ))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng,
num_examples,
shape,
channels,
axes,
num_classes):
dims = {'b': num_examples,
'c': channels}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
| junbochen/pylearn2 | pylearn2/testing/datasets.py | Python | bsd-3-clause | 2,822 |
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| RomainBrault/scikit-learn | sklearn/cluster/spectral.py | Python | bsd-3-clause | 19,196 |
import sys
from operator import add
from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) < 3:
print >> sys.stderr, \
"Usage: PythonWordCount <master> <file>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonWordCount")
lines = sc.textFile(sys.argv[2], 1)
counts = lines.flatMap(lambda x: x.split(' ')) \
.map(lambda x: (x, 1)) \
.reduceByKey(add)
output = counts.collect()
for (word, count) in output:
print "%s : %i" % (word, count)
| koeninger/spark | python/examples/wordcount.py | Python | bsd-3-clause | 555 |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from googleapiclient.discovery import build
from google.oauth2 import service_account
class RealTimeReportingServer():
SCOPES = ['https://www.googleapis.com/auth/admin.reports.audit.readonly']
USER_EMAIL = 'admin@beyondcorp.bigr.name'
def create_reports_service(self, user_email):
"""Build and returns an Admin SDK Reports service object authorized with
the service accounts that act on behalf of the given user.
Args:
user_email: The email of the user. Needs permissions to access
the Admin APIs.
Returns:
Admin SDK reports service object.
"""
localDir = os.path.dirname(os.path.abspath(__file__))
filePath = os.path.join(localDir, 'service_accountkey.json')
credentials = service_account.Credentials.from_service_account_file(
filePath, scopes=self.SCOPES)
delegatedCreds = credentials.create_delegated(user_email)
return build('admin', 'reports_v1', credentials=delegatedCreds)
def lookupevents(self, eventName, startTime, deviceId):
containsEvent = False
reportService = self.create_reports_service(self.USER_EMAIL)
results = reportService.activities().list(
userKey='all',
applicationName='chrome',
customerId='C029rpj4z',
eventName=eventName,
startTime=startTime).execute()
activities = results.get('items', [])
for activity in activities:
for event in activity.get('events', []):
for parameter in event.get('parameters', []):
if parameter['name'] == 'DEVICE_ID' and \
parameter['value'] in deviceId:
containsEvent = True
break
return containsEvent
| chromium/chromium | chrome/test/enterprise/e2e/connector/realtime_reporting_bce/reporting_server.py | Python | bsd-3-clause | 1,846 |
# -*- coding: utf-8 -*-
import django
SECRET_KEY = 'psst'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ROOT_URLCONF = 'allauth.urls'
if django.VERSION >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
],
},
},
]
else:
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.amazon',
'allauth.socialaccount.providers.angellist',
'allauth.socialaccount.providers.baidu',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitly',
'allauth.socialaccount.providers.coinbase',
'allauth.socialaccount.providers.douban',
'allauth.socialaccount.providers.dropbox',
'allauth.socialaccount.providers.dropbox_oauth2',
'allauth.socialaccount.providers.evernote',
'allauth.socialaccount.providers.feedly',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.flickr',
'allauth.socialaccount.providers.foursquare',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.hubic',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.linkedin',
'allauth.socialaccount.providers.linkedin_oauth2',
'allauth.socialaccount.providers.mailru',
'allauth.socialaccount.providers.windowslive',
'allauth.socialaccount.providers.odnoklassniki',
'allauth.socialaccount.providers.openid',
'allauth.socialaccount.providers.orcid',
'allauth.socialaccount.providers.paypal',
'allauth.socialaccount.providers.persona',
'allauth.socialaccount.providers.soundcloud',
'allauth.socialaccount.providers.spotify',
'allauth.socialaccount.providers.stackexchange',
'allauth.socialaccount.providers.tumblr',
'allauth.socialaccount.providers.twitch',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.vimeo',
'allauth.socialaccount.providers.weibo',
'allauth.socialaccount.providers.vk',
'allauth.socialaccount.providers.xing',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
| tigeraniya/django-allauth | test_settings.py | Python | mit | 4,239 |
import os
import numpy as np
def save_weights(layers, weights_dir, epoch):
for idx in range(len(layers)):
if hasattr(layers[idx], 'W'):
layers[idx].W.save_weight(
weights_dir, 'W' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'W0'):
layers[idx].W0.save_weight(
weights_dir, 'W0' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'W1'):
layers[idx].W1.save_weight(
weights_dir, 'W1' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b'):
layers[idx].b.save_weight(
weights_dir, 'b' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b0'):
layers[idx].b0.save_weight(
weights_dir, 'b0' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b1'):
layers[idx].b1.save_weight(
weights_dir, 'b1' + '_' + str(idx) + '_' + str(epoch))
def load_weights(layers, weights_dir, epoch):
for idx in range(len(layers)):
if hasattr(layers[idx], 'W'):
layers[idx].W.load_weight(
weights_dir, 'W' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'W0'):
layers[idx].W0.load_weight(
weights_dir, 'W0' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'W1'):
layers[idx].W1.load_weight(
weights_dir, 'W1' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b'):
layers[idx].b.load_weight(
weights_dir, 'b' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b0'):
layers[idx].b0.load_weight(
weights_dir, 'b0' + '_' + str(idx) + '_' + str(epoch))
if hasattr(layers[idx], 'b1'):
layers[idx].b1.load_weight(
weights_dir, 'b1' + '_' + str(idx) + '_' + str(epoch))
def save_momentums(vels, weights_dir, epoch):
for ind in range(len(vels)):
np.save(os.path.join(weights_dir, 'mom_' + str(ind) + '_' + str(epoch)),
vels[ind].get_value())
def load_momentums(vels, weights_dir, epoch):
for ind in range(len(vels)):
vels[ind].set_value(np.load(os.path.join(
weights_dir, 'mom_' + str(ind) + '_' + str(epoch) + '.npy')))
| myt00seven/svrg | para_gpu/tools.py | Python | mit | 2,391 |
#!/bin/env python
import os, sys
def usage():
print "%s WORK_DIR [num_results]" % sys.argv[0]
sys.exit(1)
try:
work_dir = sys.argv[1]
except:
print "you must specify your DenyHosts WORK_DIR"
usage()
try:
num = int(sys.argv[2])
except:
num = 10
fname = os.path.join(work_dir, "users-invalid")
try:
fp = open(fname, "r")
except:
print fname, "does not exist"
sys.exit(1)
d = {}
for line in fp:
try:
foo = line.split(":")
username = foo[0]
attempts = int(foo[1])
# timestamp = foo[2].strip()
except:
continue
l = d.get(attempts, [])
l.append(username)
d[attempts] = l
fp.close()
keys = d.keys()
keys.sort()
keys.reverse()
i = 0
for key in keys:
l = d.get(key)
for username in l:
i += 1
print username
if i >= num: break
if i >= num: break
| mobiledayadmin/DenyHosts | scripts/restricted_from_invalid.py | Python | gpl-2.0 | 884 |
"""
Tests the ``create_dot_application`` management command.
"""
from __future__ import absolute_import, unicode_literals
from django.core.management import call_command
from django.test import TestCase
from oauth2_provider.models import get_application_model
from student.tests.factories import UserFactory
from ..create_dot_application import Command
Application = get_application_model()
class TestCreateDotApplication(TestCase):
"""
Tests the ``create_dot_application`` management command.
"""
def setUp(self):
super(TestCreateDotApplication, self).setUp()
self.user = UserFactory.create()
def tearDown(self):
super(TestCreateDotApplication, self).tearDown()
Application.objects.filter(user=self.user).delete()
def test_create_dot_application(self):
call_command(Command(), 'testing_application', self.user.username)
apps = Application.objects.filter(name='testing_application')
self.assertEqual(1, len(apps))
application = apps[0]
self.assertEqual('testing_application', application.name)
self.assertEqual(self.user, application.user)
self.assertEqual(Application.GRANT_CLIENT_CREDENTIALS, application.authorization_grant_type)
self.assertEqual(Application.CLIENT_CONFIDENTIAL, application.client_type)
self.assertEqual('', application.redirect_uris)
# When called a second time with the same arguments, the command should
# exit gracefully without creating a second application.
call_command(Command(), 'testing_application', self.user.username)
apps = Application.objects.filter(name='testing_application')
self.assertEqual(1, len(apps))
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/oauth_dispatch/management/commands/tests/test_create_dot_application.py | Python | agpl-3.0 | 1,724 |
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class LineCountBear(LocalBear):
LANGUAGES = {'All'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
def run(self, filename, file, max_lines_per_file: int):
"""
Count the number of lines in a file and ensure that they are
smaller than a given size.
:param max_lines_per_file: Number of lines allowed per file.
"""
file_length = len(file)
if file_length > max_lines_per_file:
yield Result.from_values(
origin=self,
message=('This file had {count} lines, which is {extra} '
'lines more than the maximum limit specified.'
.format(count=file_length,
extra=file_length-max_lines_per_file)),
severity=RESULT_SEVERITY.NORMAL,
file=filename)
| Asnelchristian/coala-bears | bears/general/LineCountBear.py | Python | agpl-3.0 | 1,107 |
from art_helpers.instructions_helper import InstructionsHelper, InstructionsHelperException
from art_helpers.program_helper import ProgramHelper, ProgramHelperException
from art_helpers.art_robot_helper import ArtRobotArmHelper, ArtRobotHelper, UnknownRobot,\
RobotParametersNotOnParameterServer
from art_helpers.interface_state_manager import InterfaceStateManager
from art_helpers.calibration_helper import ArtCalibrationHelper
import rospy
def array_from_param(param, target_type=str, expected_length=None):
tmp = []
for s in rospy.get_param(param).split(","):
tmp.append(target_type(s.strip()))
if expected_length is not None:
assert len(tmp) == expected_length
return tmp
| robofit/ar-table-itable | art_helpers/src/art_helpers/__init__.py | Python | lgpl-2.1 | 720 |
''' Test bug 389: http://bugs.openbossa.org/show_bug.cgi?id=389'''
import sys
import unittest
from helper import UsesQApplication
from PySide import QtCore,QtGui
class BugTest(UsesQApplication):
def testCase(self):
s = QtGui.QWidget().style()
i = s.standardIcon(QtGui.QStyle.SP_TitleBarMinButton)
self.assertEqual(type(i), QtGui.QIcon)
if __name__ == '__main__':
unittest.main()
| enthought/pyside | tests/QtGui/bug_389.py | Python | lgpl-2.1 | 414 |
# -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.display
class AutoStereoDisplay(avango.display.Display):
def __init__(self, inspector, options):
super(AutoStereoDisplay, self).__init__("AutoStereoDisplay", inspector)
window = self.make_window(0, 0, 1200, 1600, 0.33, 0.43, True)
window.Name.value = ""
self.add_window(window, avango.osg.make_trans_mat(0, 1.7, -0.7), 0)
user = avango.display.nodes.User()
user.Matrix.value = avango.osg.make_trans_mat(avango.osg.Vec3(0., 1.7, 0.))
self.add_user(user)
| jakobharlan/avango | avango-display/python/avango/display/setups/AutoStereoDisplay.py | Python | lgpl-3.0 | 2,055 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import * # noqa
# don't use an unicode string
localeID = 'ru_RU'
dateSep = ['-', '.']
timeSep = [':']
meridian = []
usesMeridian = False
uses24 = True
Weekdays = [
'понедельник', 'вторник', 'среда', 'четверг',
'пятница', 'суббота', 'воскресенье',
]
shortWeekdays = [
'пн', 'вт', 'ср', 'чт', 'пт', 'сб', 'вс',
]
# library does not know how to conjugate words
# библиотека не умеет спрягать слова
Months = [
'января', 'февраля', 'марта', 'апреля', 'мая', 'июня', 'июля',
'августа', 'сентября', 'октября', 'ноября', 'декабря',
]
shortMonths = [
'янв', 'фев', 'мрт', 'апр', 'май', 'июн',
'июл', 'авг', 'сен', 'окт', 'нбр', 'дек',
]
dateFormats = {
'full': 'EEEE, dd MMMM yyyy',
'long': 'dd MMMM yyyy',
'medium': 'dd-MM-yyyy',
'short': 'dd-MM-yy',
}
timeFormats = {
'full': 'HH:mm:ss v',
'long': 'HH:mm:ss z',
'medium': 'HH:mm:ss',
'short': 'HH:mm',
}
dp_order = ['d', 'm', 'y']
decimal_mark = '.'
units = {
'seconds': ['секунда', 'секунды', 'секунд', 'сек', 'с'],
'minutes': ['минута', 'минуты', 'минут', 'мин', 'м'],
'hours': ['час', 'часов', 'часа', 'ч'],
'days': ['день', 'дней', 'д'],
'weeks': ['неделя', 'недели', 'н'],
'months': ['месяц', 'месяца', 'мес'],
'years': ['год', 'года', 'годы', 'г'],
}
re_values = re_values.copy()
re_values.update({
'specials': 'om',
'timeseparator': ':',
'rangeseparator': '-',
'daysuffix': 'ого|ой|ий|тье',
'qunits': 'д|мес|г|ч|н|м|с',
'now': ['сейчас'],
})
Modifiers = {
'после': 1,
'назад': -1,
'предыдущий': -1,
'последний': -1,
'далее': 1,
'ранее': -1,
}
dayOffsets = {
'завтра': 1,
'сегодня': 0,
'вчера': -1,
'позавчера': -2,
'послезавтра': 2,
}
re_sources = {
'полдень': {'hr': 12, 'mn': 0, 'sec': 0},
'день': {'hr': 13, 'mn': 0, 'sec': 0},
'обед': {'hr': 12, 'mn': 0, 'sec': 0},
'утро': {'hr': 6, 'mn': 0, 'sec': 0},
'завтрак': {'hr': 8, 'mn': 0, 'sec': 0},
'ужин': {'hr': 19, 'mn': 0, 'sec': 0},
'вечер': {'hr': 18, 'mn': 0, 'sec': 0},
'полночь': {'hr': 0, 'mn': 0, 'sec': 0},
'ночь': {'hr': 21, 'mn': 0, 'sec': 0},
}
small = {
'ноль': 0,
'один': 1,
'два': 2,
'три': 3,
'четыре': 4,
'пять': 5,
'шесть': 6,
'семь': 7,
'восемь': 8,
'девять': 9,
'десять': 10,
'одиннадцать': 11,
'двенадцать': 12,
'тринадцать': 13,
'четырнадцать': 14,
'пятнадцать': 15,
'шестнадцать': 16,
'семнадцать': 17,
'восемнадцать': 18,
'девятнадцать': 19,
'двадцать': 20,
'тридцать': 30,
'сорок': 40,
'пятьдесят': 50,
'шестьдесят': 60,
'семьдесят': 70,
'восемьдесят': 80,
'девяносто': 90,
}
numbers = {
'ноль': 0,
'один': 1,
'два': 2,
'три': 3,
'четыре': 4,
'пять': 5,
'шесть': 6,
'семь': 7,
'восемь': 8,
'девять': 9,
'десять': 10,
'одиннадцать': 11,
'двенадцать': 12,
'тринадцать': 13,
'четырнадцать': 14,
'пятнадцать': 15,
'шестнадцать': 16,
'семнадцать': 17,
'восемнадцать': 18,
'девятнадцать': 19,
'двадцать': 20,
}
magnitude = {
'тысяча': 1000,
'миллион': 1000000,
'миллиард': 1000000000,
'триллион': 1000000000000,
'квадриллион': 1000000000000000,
'квинтиллион': 1000000000000000000,
'секстиллион': 1000000000000000000000,
'септиллион': 1000000000000000000000000,
'октиллион': 1000000000000000000000000000,
'нониллион': 1000000000000000000000000000000,
'дециллион': 1000000000000000000000000000000000,
}
| garlick/flux-core | src/bindings/python/flux/utils/parsedatetime/pdt_locales/ru_RU.py | Python | lgpl-3.0 | 4,577 |
'''OpenGL extension NV.read_depth_stencil
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.read_depth_stencil to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/read_depth_stencil.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.read_depth_stencil import *
from OpenGL.raw.GLES2.NV.read_depth_stencil import _EXTENSION_NAME
def glInitReadDepthStencilNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GLES2/NV/read_depth_stencil.py | Python | lgpl-3.0 | 785 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Install necessary 3rd party Python modules to run all tests.
# This script is supposed to be used in a continuous integration system:
# https://jenkins.shiningpanda.com/pyinstaller/
# Python there is mostly 64bit. Only Python 2.4 is 32bit on Windows 7.
import glob
import optparse
import os
import platform
import sys
# easy_install command used in a Python script.
from setuptools.command import easy_install
# Expand PYTHONPATH with PyInstaller package to support running without
# installation -- only if not running in a virtualenv.
if not hasattr(sys, 'real_prefix'):
pyi_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, pyi_home)
from PyInstaller.compat import is_py25, is_py26
PYVER = '.'.join([str(x) for x in sys.version_info[0:2]])
def py_arch():
"""
.exe installers of Python modules contain architecture name in filename.
"""
mapping = {'32bit': 'win32', '64bit': 'win-amd64'}
arch = platform.architecture()[0]
return mapping[arch]
_PACKAGES = {
'docutils': ['docutils'],
'jinja2': ['jinja2'],
'sphinx': ['sphinx'],
'pytz': ['pytz'],
'IPython': ['IPython'],
# 'modulename': 'pypi_name_or_url_or_path'
'MySQLdb': ['MySQL-python-*%s-py%s.exe' % (py_arch(), PYVER)],
'numpy': ['numpy-unoptimized-*%s-py%s.exe' % (py_arch(), PYVER)],
'PIL': ['PIL-*%s-py%s.exe' % (py_arch(), PYVER)],
'psycopg2': ['psycopg2-*%s-py%s.exe' % (py_arch(), PYVER)],
#'pycrypto': ['pycrypto'],
'pyodbc': ['pyodbc'],
#'simplejson': ['simplejson'],
'sqlalchemy': ['SQLAlchemy-*%s-py%s.exe' % (py_arch(), PYVER)],
'wx': ['wxPython-common-*%s-py%s.exe' % (py_arch(), PYVER),
'wxPython-2*%s-py%s.exe' % (py_arch(), PYVER)],
# PyWin32 is installed on ShiningPanda hosting.
'win32api': ['http://downloads.sourceforge.net/project/pywin32/pywin32/Build%%20217/pywin32-217.%s-py%s.exe' %
(py_arch(), PYVER)],
}
_PY_VERSION = {
'MySQLdb': is_py26,
'numpy': is_py26,
'PIL': is_py26,
'psycopg2': is_py26,
'simplejson': is_py25,
# Installers are available only for Python 2.6/2.7.
'wx': is_py26,
}
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--download-dir',
help='Directory with maually downloaded python modules.'
)
opts, _ = parser.parse_args()
# Install packages.
for k, v in _PACKAGES.items():
# Test python version for module.
if k in _PY_VERSION:
# Python version too old, skip module.
if PYVER < _PY_VERSION[k]:
continue
try:
__import__(k)
print 'Already installed... %s' % k
# Module is not available - install it.
except ImportError:
# If not url or module name then look for installers in download area.
if not v[0].startswith('http') and v[0].endswith('exe'):
files = []
# Try all file patterns.
for pattern in v:
pattern = os.path.join(opts.download_dir, pattern)
files += glob.glob(pattern)
# No file with that pattern was not found - skip it.
if not files:
print 'Skipping module... %s' % k
continue
# Full path to installers in download directory.
v = files
print 'Installing module... %s' % k
# Some modules might require several .exe files to install.
for f in v:
print ' %s' % f
# Use --no-deps ... installing module dependencies might fail
# because easy_install tries to install the same module from
# PYPI from source code and if fails because of C code that
# that needs to be compiled.
try:
easy_install.main(['--no-deps', '--always-unzip', f])
except Exception:
print ' %s installation failed' % k
if __name__ == '__main__':
main()
| NullScope/BorderlessStone | tests/setupenv_windows.py | Python | lgpl-3.0 | 4,510 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import urllib
from copy import deepcopy
from datetime import date
from logging import getLogger
from urlparse import urljoin
from sqlalchemy import and_
from ggrc import db
from ggrc import utils
from ggrc.models.revision import Revision
from ggrc.notifications import data_handlers
from ggrc.utils import merge_dicts, get_url_root
from ggrc_basic_permissions.models import Role, UserRole
from ggrc_workflows.models import Cycle
from ggrc_workflows.models import CycleTaskGroupObjectTask
from ggrc_workflows.models import Workflow
# pylint: disable=invalid-name
logger = getLogger(__name__)
"""
exposed functions
get_cycle_data,
get_workflow_data,
get_cycle_task_data,
"""
def get_cycle_created_task_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
cycle_task_group = cycle_task.cycle_task_group
cycle = cycle_task_group.cycle
force = cycle.workflow.notify_on_change
task_assignee = data_handlers.get_person_dict(cycle_task.contact)
task_group_assignee = data_handlers.get_person_dict(cycle_task_group.contact)
workflow_owners = get_workflow_owners_dict(cycle.context_id)
task = {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
result = {}
assignee_data = {
task_assignee['email']: {
"user": task_assignee,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"my_tasks": deepcopy(task)
}
}
}
}
tg_assignee_data = {
task_group_assignee['email']: {
"user": task_group_assignee,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"my_task_groups": {
cycle_task_group.id: deepcopy(task)
}
}
}
}
}
for workflow_owner in workflow_owners.itervalues():
wf_owner_data = {
workflow_owner['email']: {
"user": workflow_owner,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"cycle_tasks": deepcopy(task)
}
}
}
}
result = merge_dicts(result, wf_owner_data)
return merge_dicts(result, assignee_data, tg_assignee_data)
def get_cycle_task_due(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
if not cycle_task.contact:
logger.warning(
'Contact for cycle task %s not found.',
notification.object_id)
return {}
notif_name = notification.notification_type.name
due = "due_today" if notif_name == "cycle_task_due_today" else "due_in"
force = cycle_task.cycle_task_group.cycle.workflow.notify_on_change
return {
cycle_task.contact.email: {
"user": data_handlers.get_person_dict(cycle_task.contact),
"force_notifications": {
notification.id: force
},
due: {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
}
}
def get_all_cycle_tasks_completed_data(notification, cycle):
workflow_owners = get_workflow_owners_dict(cycle.context_id)
force = cycle.workflow.notify_on_change
result = {}
for workflow_owner in workflow_owners.itervalues():
wf_data = {
workflow_owner['email']: {
"user": workflow_owner,
"force_notifications": {
notification.id: force
},
"all_tasks_completed": {
cycle.id: get_cycle_dict(cycle)
}
}
}
result = merge_dicts(result, wf_data)
return result
def get_cycle_created_data(notification, cycle):
if not cycle.is_current:
return {}
manual = notification.notification_type.name == "manual_cycle_created"
force = cycle.workflow.notify_on_change
result = {}
for user_role in cycle.workflow.context.user_roles:
person = user_role.person
result[person.email] = {
"user": data_handlers.get_person_dict(person),
"force_notifications": {
notification.id: force
},
"cycle_started": {
cycle.id: get_cycle_dict(cycle, manual)
}
}
return result
def get_cycle_data(notification):
cycle = get_object(Cycle, notification.object_id)
if not cycle:
return {}
notification_name = notification.notification_type.name
if notification_name in ["manual_cycle_created", "cycle_created"]:
return get_cycle_created_data(notification, cycle)
elif notification_name == "all_cycle_tasks_completed":
return get_all_cycle_tasks_completed_data(notification, cycle)
return {}
def get_cycle_task_declined_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task or not cycle_task.contact:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
force = cycle_task.cycle_task_group.cycle.workflow.notify_on_change
return {
cycle_task.contact.email: {
"user": data_handlers.get_person_dict(cycle_task.contact),
"force_notifications": {
notification.id: force
},
"task_declined": {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
}
}
def get_cycle_task_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task or not cycle_task.cycle_task_group.cycle.is_current:
return {}
notification_name = notification.notification_type.name
if notification_name in ["manual_cycle_created", "cycle_created"]:
return get_cycle_created_task_data(notification)
elif notification_name == "cycle_task_declined":
return get_cycle_task_declined_data(notification)
elif notification_name in ["cycle_task_due_in",
"one_time_cycle_task_due_in",
"weekly_cycle_task_due_in",
"monthly_cycle_task_due_in",
"quarterly_cycle_task_due_in",
"annually_cycle_task_due_in",
"cycle_task_due_today"]:
return get_cycle_task_due(notification)
return {}
def get_workflow_starts_in_data(notification, workflow):
if workflow.status != "Active":
return {}
if (not workflow.next_cycle_start_date or
workflow.next_cycle_start_date < date.today()):
return {} # this can only be if the cycle has successfully started
result = {}
workflow_owners = get_workflow_owners_dict(workflow.context_id)
force = workflow.notify_on_change
for user_roles in workflow.context.user_roles:
wf_person = user_roles.person
result[wf_person.email] = {
"user": data_handlers.get_person_dict(wf_person),
"force_notifications": {
notification.id: force
},
"cycle_starts_in": {
workflow.id: {
"workflow_owners": workflow_owners,
"workflow_url": get_workflow_url(workflow),
"start_date": workflow.next_cycle_start_date,
"start_date_statement": utils.get_digest_date_statement(
workflow.next_cycle_start_date, "start", True),
"custom_message": workflow.notify_custom_message,
"title": workflow.title,
}
}
}
return result
def get_cycle_start_failed_data(notification, workflow):
if workflow.status != "Active":
return {}
if (not workflow.next_cycle_start_date or
workflow.next_cycle_start_date >= date.today()):
return {} # this can only be if the cycle has successfully started
result = {}
workflow_owners = get_workflow_owners_dict(workflow.context_id)
force = workflow.notify_on_change
for wf_owner in workflow_owners.itervalues():
result[wf_owner["email"]] = {
"user": wf_owner,
"force_notifications": {
notification.id: force
},
"cycle_start_failed": {
workflow.id: {
"workflow_owners": workflow_owners,
"workflow_url": get_workflow_url(workflow),
"start_date": workflow.next_cycle_start_date,
"start_date_statement": utils.get_digest_date_statement(
workflow.next_cycle_start_date, "start", True),
"custom_message": workflow.notify_custom_message,
"title": workflow.title,
}
}
}
return result
def get_workflow_data(notification):
workflow = get_object(Workflow, notification.object_id)
if not workflow:
return {}
if workflow.frequency == "one_time":
# one time workflows get cycles manually created and that triggers
# the instant notification.
return {}
if "_workflow_starts_in" in notification.notification_type.name:
return get_workflow_starts_in_data(notification, workflow)
if "cycle_start_failed" == notification.notification_type.name:
return get_cycle_start_failed_data(notification, workflow)
return {}
def get_object(obj_class, obj_id):
result = db.session.query(obj_class).filter(obj_class.id == obj_id)
if result.count() == 1:
return result.one()
return None
def get_workflow_owners_dict(context_id):
owners = db.session.query(UserRole).join(Role).filter(
and_(UserRole.context_id == context_id,
Role.name == "WorkflowOwner")).all()
return {user_role.person.id: data_handlers.get_person_dict(user_role.person)
for user_role in owners}
def _get_object_info_from_revision(revision, known_type):
""" returns type and id of the searched object, if we have one part of
the relationship known.
"""
object_type = revision.destination_type \
if revision.source_type == known_type \
else revision.source_type
object_id = revision.destination_id if \
revision.source_type == known_type \
else revision.source_id
return object_type, object_id
def get_cycle_task_dict(cycle_task):
object_titles = []
# every object should have a title or at least a name like person object
for related_object in cycle_task.related_objects:
object_titles.append(getattr(related_object, "title", "") or
getattr(related_object, "name", "") or
u"Untitled object")
# related objects might have been deleted or unmapped,
# check the revision history
deleted_relationships_sources = db.session.query(Revision).filter(
Revision.resource_type == "Relationship",
Revision.action == "deleted",
Revision.source_type == "CycleTaskGroupObjectTask",
Revision.source_id == cycle_task.id
)
deleted_relationships_destinations = db.session.query(Revision).filter(
Revision.resource_type == "Relationship",
Revision.action == "deleted",
Revision.destination_type == "CycleTaskGroupObjectTask",
Revision.destination_id == cycle_task.id
)
deleted_relationships = deleted_relationships_sources.union(
deleted_relationships_destinations).all()
for deleted_relationship in deleted_relationships:
removed_object_type, removed_object_id = _get_object_info_from_revision(
deleted_relationship, "CycleTaskGroupObjectTask")
object_data = db.session.query(Revision).filter(
Revision.resource_type == removed_object_type,
Revision.resource_id == removed_object_id,
).order_by(Revision.id.desc()).first()
object_titles.append(
u"{} [removed from task]".format(object_data.content["display_name"])
)
# the filter expression to be included in the cycle task's URL and
# automatically applied when user visits it
filter_exp = u"id=" + unicode(cycle_task.cycle_id)
return {
"title": cycle_task.title,
"related_objects": object_titles,
"end_date": cycle_task.end_date.strftime("%m/%d/%Y"),
"due_date_statement": utils.get_digest_date_statement(
cycle_task.end_date, "due"),
"cycle_task_url": get_cycle_task_url(cycle_task, filter_exp=filter_exp),
}
def get_cycle_dict(cycle, manual=False):
workflow_owners = get_workflow_owners_dict(cycle.context_id)
return {
"manually": manual,
"custom_message": cycle.workflow.notify_custom_message,
"cycle_title": cycle.title,
"workflow_owners": workflow_owners,
"cycle_url": get_cycle_url(cycle),
}
def get_workflow_url(workflow):
url = "workflows/{}#current_widget".format(workflow.id)
return urljoin(get_url_root(), url)
def get_cycle_task_url(cycle_task, filter_exp=u""):
if filter_exp:
filter_exp = u"?filter=" + urllib.quote(filter_exp)
url = (u"/workflows/{workflow_id}"
u"{filter_exp}"
u"#current_widget/cycle/{cycle_id}"
u"/cycle_task_group/{cycle_task_group_id}"
u"/cycle_task_group_object_task/{cycle_task_id}").format(
workflow_id=cycle_task.cycle_task_group.cycle.workflow.id,
filter_exp=filter_exp,
cycle_id=cycle_task.cycle_task_group.cycle.id,
cycle_task_group_id=cycle_task.cycle_task_group.id,
cycle_task_id=cycle_task.id,
)
return urljoin(get_url_root(), url)
def get_cycle_url(cycle):
url = "workflows/{workflow_id}#current_widget/cycle/{cycle_id}".format(
workflow_id=cycle.workflow.id,
cycle_id=cycle.id,
)
return urljoin(get_url_root(), url)
| plamut/ggrc-core | src/ggrc_workflows/notification/data_handler.py | Python | apache-2.0 | 14,000 |