repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
IronLanguages/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/test/sample_doctest.py
|
203
|
"""This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print('a')
... print()
... print('b')
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
|
gabrielbdsantos/BJ_Sickrage
|
refs/heads/master
|
dailysearch_cron.py
|
1
|
#!/usr/bin/env python
import argparse
import json
import urllib
def main(url, port, apiKey):
backlog = urllib.urlopen(
"{}/api/{}/?cmd=backlog".format("http://" + url + ":" + port, apiKey)
)
jsonBacklog = json.loads(backlog.read())
for tvshow in jsonBacklog['data']:
indexerid = tvshow['indexerid']
episodes = tvshow['episodes']
for episode in episodes:
season = episode['season']
episodeNumber = episode['episode']
urllib.urlopen(
"{}/api/{}/?cmd=episode.search&indexerid={}" + \
"&season={}&episode={}".format(
urlSickRage,
apiKey,
indexerid,
season,
episodeNumber,
)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Execute Sickrage's Daily Search",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'apiKey',
type = str,
help = "Sickrage api key"
)
parser.add_argument(
'-u',
'--url',
type = str,
default = "localhost",
help = "Sickrage Url",
)
parser.add_argument(
'-p',
'--port',
type = str,
default = "8081",
help="Sickrage port"
)
args = parser.parse_args()
main(args.url, args.port, args.apiKey)
|
GiovanniConserva/TestDeploy
|
refs/heads/master
|
venv/Lib/site-packages/pip/commands/completion.py
|
143
|
from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command used for command completion'
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
|
JianfuLi/shadowsocks
|
refs/heads/master
|
tests/graceful_server.py
|
977
|
#!/usr/bin/python
import socket
if __name__ == '__main__':
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 8001))
s.listen(1024)
c = None
while True:
c = s.accept()
|
acsone/project
|
refs/heads/8.0
|
service_desk/project.py
|
23
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ProjectProject(orm.Model):
_inherit = 'project.project'
_columns = {
'use_analytic_account': fields.selection(
[('no', 'No'), ('yes', 'Optional'), ('req', 'Required')],
'Use Analytic Account'),
}
_defaults = {
'use_analytic_account': 'no',
}
class ProjectTask(orm.Model):
"""
Add related ``Analytic Account`` and service ``Location``.
A Location can be any Contact Partner of the AA's Partner.
Other logic is possible, such as maintaining a specific list of service
addresses for each Contract, but that's out of scope here -
modules implementing these other possibilities are very welcome.
"""
_inherit = 'project.task'
_columns = {
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
domain="[('type','in',['normal','contract'])]"),
'location_id': fields.many2one(
'res.partner', 'Location',
domain="[('parent_id','child_of',partner_id)]"),
'use_analytic_account': fields.related(
'project_id', 'use_analytic_account',
type='char', string="Use Analytic Account"),
'project_code': fields.related(
'project_id', 'code', type='char', string="Project Code"),
}
def onchange_project(self, cr, uid, id, project_id, context=None):
# on_change is necessary to populate fields on Create, before saving
try:
# try applying a parent's onchange, may it exist
res = super(ProjectTask, self).onchange_project(
cr, uid, id, project_id, context=context) or {}
except AttributeError:
res = {}
if project_id:
obj = self.pool.get('project.project').browse(
cr, uid, project_id, context=context)
res.setdefault('value', {})
res['value']['use_analytic_account'] = (
obj.use_analytic_account or 'no')
return res
def onchange_analytic(self, cr, uid, id, analytic_id, context=None):
res = {}
model = self.pool.get('account.analytic.account')
obj = model.browse(cr, uid, analytic_id, context=context)
if obj:
# "contact_id" and "department_id" may be provided by other modules
fldmap = [ # analytic_account field -> task field
('partner_id', 'partner_id'),
('contact_id', 'location_id'),
('department_id', 'department_id')]
res['value'] = {dest: getattr(obj, orig).id
for orig, dest in fldmap
if hasattr(obj, orig) and getattr(obj, orig)}
return res
|
drnextgis/QGIS
|
refs/heads/master
|
python/ext-libs/yaml/emitter.py
|
388
|
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
u'tag:yaml.org,2002:' : u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = self.event.tags.keys()
handles.sort()
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = self.tag_prefixes.keys()
prefixes.sort()
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u'\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
hints = u''
if text:
if text[0] in u' \n\x85\u2028\u2029':
hints += unicode(self.best_indent)
if text[-1] not in u'\n\x85\u2028\u2029':
hints += u'-'
elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
hints += u'+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'>'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'|'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
|
codrut3/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/metrics.py
|
30
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=wildcard-import
from tensorflow.contrib.eager.python.metrics_impl import *
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['Accuracy', 'Mean', 'Metric']
remove_undocumented(__name__, _allowed_symbols)
|
jbzdak/data-base-checker
|
refs/heads/master
|
bdcheckerapp/registration/backend/__init__.py
|
1349
|
# -*- coding: utf-8 -*-
|
zacandcheese/Keyboard-Biometric-Project
|
refs/heads/master
|
Project_Tuples/oldMain.py
|
1
|
#cmd /K "$(FULL_CURRENT_PATH)"
"""
Author: Zachary Nowak
Date:11/3/2017
Program Description: This code can record the
Press Time and Flight Time of a tuple as a user
types a passage and it saves a matrix to a file.
"""
#TO-DO
"""
PRESS TIME
STORING DATA
MAKING A SIGNATURE
"""
"""PYTHON/SITE PACKAGES"""
import win32api
import os
import time
import numpy as np
"""FOLDER FILES"""
import listOfAllKeys
import determineChar
#import passageMaker
"""FOLDER IMPORTS"""
#passage = passageMaker.create(something)
#passage = "The quick brown fox jumps over the lazy dog talking back"
passage = "The trophy other with both graph phone phat three philly hath that weather pho "
#tupleList = passageMaker.list()
#NOTE TUPLES MUST BE SAME SIZE
tupleList = ["th","ph"]
stateDict = listOfAllKeys.stateDict
nameDict = listOfAllKeys.nameDict
"""LOCAL VARIABLES"""
timingList = [[] for i in range(len(tupleList))]#FOR TIME IT TAKES TO WRITE A WORD
pressList = [[0] for i in range(len(tupleList))]#FOR PRESS TIME [TIME, AVGPRESSTIME, COUNTER] EACH FIRST LETTER
tuplePresent = False
tupleCounter = 0
tupleTime = 0
passageTyped = ""
end = True
enterCounter = 1 #CHANGE TO 0 WHEN INCLUDING A NAMING FEATURE
print(passage)
while end:
for i in range(0,256):
try:
if(win32api.GetAsyncKeyState(i) == stateDict[nameDict[i]]):
"""DETERMINE WHAT CHAR IT IS"""
char = nameDict[i]
char2 = determineChar.determineChar(i,stateDict,nameDict)
"""ADD TO THE STRING"""
if char2 == "SHIFT" or char2 == "DELETE": #DO NOT ADD TO STRING
if stateDict[char] == 0:
stateDict[char] = -32768 #CHANGE STATES
else:
stateDict[char] = 0 #CHANGES STATE
os.system('cls')#CLEARS THE COMMAND PROMPT
if char == "DELETE":#DELETE
passageTyped = passageTyped[:-1]
print(passage)
print(passageTyped)
"""RELEASED"""
elif stateDict[char] == 0:
stateDict[char] = -32768 #CHANGE STATES
"""PRESSED"""
else:
passageTyped += char2
os.system('cls')#CLEARS THE COMMAND PROMPT
if (char == '\n'):
if(enterCounter>0):#IF ENTER IS PRESSED BREAK THE CODE
end = False
else:
enterCounter+=1
print(passage)
print(passageTyped)
stateDict[char] = 0 #CHANGES STATE
"""TUPLE STUFF"""
if tuplePresent:
"""DETERMINE IF IT IS STILL GOOD"""
if(passageTyped[-1] == tuple[tupleCounter]):
"""DETERMINE IF IT IS DONE"""
if tupleCounter == len(tupleList[0])-1:
word = passageTyped[len(passageTyped)-(tupleCounter+1):]
wordIndex = tupleList.index(word)
timingList[wordIndex].append(time.time()-tupleTime)
tuplePresent = False
tupleCounter = 0
else:
tupleCounter += 1
else:
tuplePresent = False
tupleCounter = 0
"""DETERMINE IF IT IS A START OF A TUPLE"""
if not tuplePresent:
for tuple in tupleList:
if passageTyped[-1] == tuple[0]:
tuplePresent = True
tupleCounter += 1
tupleTime = time.time()
break
except KeyError:
pass
print("THIS IS THE TUPLE LIST", tupleList)
print("THIS IS THE TIMINGLIST", timingList)
getMessage = input()#PREVENT ERRORS
|
sergiocorato/odoomrp-wip
|
refs/heads/8.0
|
stock_quant_name_search/models/__init__.py
|
18
|
# -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import stock_quant
|
jhoos/django
|
refs/heads/master
|
tests/migrations/test_migrations_no_changes/0002_second.py
|
439
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", null=True)),
],
)
]
|
simudream/PyDev.Debugger
|
refs/heads/development
|
pydev_imports.py
|
53
|
from pydevd_constants import USE_LIB_COPY, izip
try:
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
else:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
except ImportError:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
try:
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
else:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
except ImportError:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
execfile=execfile #Not in Py3k
except NameError:
from _pydev_imps._pydev_execfile import execfile
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_Queue as _queue
else:
import Queue as _queue
except:
import queue as _queue #@UnresolvedImport
try:
from pydevd_exec import Exec
except:
from pydevd_exec2 import Exec
try:
from urllib import quote, quote_plus, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
import os
try:
relpath = os.path.relpath
except:
# Only there from 2.6 onwards... let's provide a replacement.
def _split_path(path):
parts = []
loc = path
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(path, start=None):
if start is None:
start = os.curdir
origin = os.path.abspath(path)
start = os.path.abspath(start)
orig_list = _split_path(os.path.normcase(origin))
dest_list = _split_path(start)
if orig_list[0] != os.path.normcase(dest_list[0]):
return start
i = 0
for start_seg, dest_seg in izip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
segments = [os.pardir] * (len(orig_list) - i)
segments += dest_list[i:]
if len(segments) == 0:
return os.curdir
else:
return os.path.join(*segments)
|
evz/illuminator
|
refs/heads/master
|
tif/wsgi.py
|
1
|
"""
WSGI config for tif project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tif.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tif.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
devs1991/test_edx_docmode
|
refs/heads/master
|
venv/lib/python2.7/site-packages/Crypto/SelfTest/Protocol/__init__.py
|
116
|
# -*- coding: utf-8 -*-
#
# SelfTest/Protocol/__init__.py: Self-tests for Crypto.Protocol
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for Crypto.Protocol"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Protocol import test_chaffing; tests += test_chaffing.get_tests(config=config)
from Crypto.SelfTest.Protocol import test_rfc1751; tests += test_rfc1751.get_tests(config=config)
from Crypto.SelfTest.Protocol import test_AllOrNothing; tests += test_AllOrNothing.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
Matir/pwnableweb
|
refs/heads/master
|
pwnableapp/client.py
|
1
|
# Copyright 2014 David Tomaschik <david@systemoverlord.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import daemonize
import os
import signal
import sys
import threading
import time
import xvfbwrapper
from selenium import webdriver
from selenium.common import exceptions
class VulnerableClient(object):
"""Run a Chrome client to simulate XSS, XSRF, etc.
Runs Chrome inside XVFB. Only need to override run().
"""
def __init__(self, name, chromedriver_path='chromedriver'):
# Setup objects
self._run_event = threading.Event()
self._stop_event = threading.Event()
self._thread = None
self._chromedriver_path = chromedriver_path
self._daemon = None
self._name = name
self._started = False
# Parse config
self._config = self._parse_config()
if self._config.daemon:
user = self._config.user if os.geteuid() == 0 else None
group = self._config.group if os.geteuid() == 0 else None
self._daemon = daemonize.Daemonize(
name,
pid=self._config.pidfile,
action=self._start_internal,
user=user,
group=group)
def _parse_config(self):
parser = argparse.ArgumentParser(description='Vulnerable Client')
parser.add_argument('--user', help='Drop privileges to this user.',
default='nobody')
parser.add_argument('--group', help='Drop privileges to this group.',
default='nogroup')
parser.add_argument('--nodaemon', help='Daemonize.', action='store_false',
dest='daemon')
parser.add_argument('--pidfile', help='Write pid to file.',
default='/tmp/%s.pid' % self._name)
return parser.parse_args()
def __del__(self):
# Attempt to shutdown xvfb and browser
self.stop()
def stop(self, *unused_args):
if not self._started:
return
try:
self._stop_event.set()
if self._thread:
self._thread.join()
self.browser.quit()
self.browser = None
self.xvfb.stop()
self.xvfb = None
except AttributeError:
pass
sys.exit(0)
def start(self, check_interval=60):
"""Manage running the run() function.
Calls run at check_interval seconds, unless run returns True, which
reschedules it immediately.
"""
self._interval = check_interval
if self._daemon:
self._daemon.start()
else:
self._start_internal()
def _start_internal(self):
self._started = True
# Signals
for sig in (signal.SIGINT, signal.SIGQUIT, signal.SIGTERM):
signal.signal(sig, self.stop)
# Setup the browser & xvfb
self.xvfb = xvfbwrapper.Xvfb(width=1024, height=768)
self.xvfb.start()
self.browser = webdriver.Chrome(executable_path=self._chromedriver_path)
self._run_event.set()
self._stop_event.clear()
self._thread = threading.Thread(target=self._wrap_run)
self._thread.start()
try:
while True:
time.sleep(self._interval)
self._run_event.set()
except KeyboardInterrupt:
print 'Saw CTRL-C, shutting down.'
self.stop()
def _wrap_run(self):
"""Run in a separate thread."""
while not self._stop_event.is_set():
if not self._run_event.wait(5):
continue
self._run_event.clear()
try:
while self.run():
if self._stop_event.is_set():
break
except exceptions.WebDriverException:
continue
def run(self):
raise NotImplementedError("Must be implemented by subclass.")
@property
def chain(self):
return webdriver.ActionChains(self.browser)
|
nwchandler/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aos/__init__.py
|
12133432
| |
PyCon/pycon
|
refs/heads/master
|
symposion/proposals/management/commands/__init__.py
|
12133432
| |
alexandrucoman/vbox-nova-driver
|
refs/heads/master
|
nova/conductor/tasks/__init__.py
|
12133432
| |
mwiebe/numpy
|
refs/heads/master
|
numpy/lib/format.py
|
33
|
"""
Define a simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
persisting a *single* arbitrary NumPy array on disk. The format stores all
of the shape and dtype information necessary to reconstruct the array
correctly even on another machine with a different architecture.
The format is designed to be as simple as possible while achieving
its limited goals.
The ``.npz`` format is the standard format for persisting *multiple* NumPy
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
files, one for each array.
Capabilities
------------
- Can represent all NumPy arrays including nested record arrays and
object arrays.
- Represents the data in its native binary form.
- Supports Fortran-contiguous arrays directly.
- Stores all of the necessary information to reconstruct the array
including shape and dtype on a machine of a different
architecture. Both little-endian and big-endian arrays are
supported, and a file with little-endian numbers will yield
a little-endian array on any machine reading the file. The
types are described in terms of their actual sizes. For example,
if a machine with a 64-bit C "long int" writes out an array with
"long ints", a reading machine with 32-bit C "long ints" will yield
an array with 64-bit integers.
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
able to create a solution in their preferred programming language to
read most ``.npy`` files that he has been given without much
documentation.
- Allows memory-mapping of the data. See `open_memmep`.
- Can be read from a filelike stream object instead of an actual file.
- Stores object arrays, i.e. arrays containing elements that are arbitrary
Python objects. Files with object arrays are not to be mmapable, but
can be read and written to disk.
Limitations
-----------
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
Subclasses will be accepted for writing, but only the array data will
be written out. A regular numpy.ndarray object will be created
upon reading the file.
.. warning::
Due to limitations in the interpretation of structured dtypes, dtypes
with fields with empty names will have the names replaced by 'f0', 'f1',
etc. Such arrays will not round-trip through the format entirely
accurately. The data is intact; only the field names will differ. We are
working on a fix for this. This fix will not require a change in the
file format. The arrays with such structures can still be saved and
restored, and the correct dtype may be restored by using the
``loadedarray.view(correct_dtype)`` method.
File extensions
---------------
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
in this format. This is by no means a requirement; applications may wish
to use these file formats but use an extension specific to the
application. In the absence of an obvious alternative, however,
we suggest using ``.npy`` and ``.npz``.
Version numbering
-----------------
The version numbering of these formats is independent of NumPy version
numbering. If the format is upgraded, the code in `numpy.io` will still
be able to read and write Version 1.0 files.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
spaces (``\\x20``) to make the total length of
``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the `numpy.dtype`
constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since
Fortran-contiguous arrays are a common form of non-C-contiguity,
we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following the header comes the array data. If the dtype contains Python
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
pickle of the array. Otherwise the data is the contiguous (either C-
or Fortran-, depending on ``fortran_order``) bytes of the array.
Consumers can figure out the number of bytes by multiplying the number
of elements given by the shape (noting that ``shape=()`` means there is
1 element) by ``dtype.itemsize``.
Format Version 2.0
------------------
The version 1.0 format only allowed the array header to have a total size of
65535 bytes. This can be exceeded by structured arrays with a large number of
columns. The version 2.0 format extends the header size to 4 GiB.
`numpy.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
Notes
-----
The ``.npy`` format, including reasons for creating it and a comparison of
alternatives, is described fully in the "npy-format" NEP.
"""
from __future__ import division, absolute_import, print_function
import numpy
import sys
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import asbytes, asstr, isfileobj, long, basestring
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
def _check_version(version):
if version not in [(1, 0), (2, 0), None]:
msg = "we only support format version (1,0) and (2, 0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
""" Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
if sys.version_info[0] < 3:
return MAGIC_PREFIX + chr(major) + chr(minor)
else:
return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
if sys.version_info[0] < 3:
major, minor = map(ord, magic_str[-2:])
else:
major, minor = magic_str[-2:]
return major, minor
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
The .descr attribute of a dtype object cannot be round-tripped through
the dtype() constructor. Simple types, like dtype('float32'), have
a descr which looks like a record array with one field with '' as
a name. The dtype() constructor interprets this as a request to give
a default name. Instead, we construct descriptor that can be passed to
dtype().
Parameters
----------
dtype : dtype
The dtype of the array that will be written to disk.
Returns
-------
descr : object
An object that can be passed to `numpy.dtype()` in order to
replicate the input dtype.
"""
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
# fiddled with. This needs to be fixed in the C implementation of
# dtype().
return dtype.descr
else:
return dtype.str
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
Parameters
----------
array : numpy.ndarray
Returns
-------
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
"""
d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
d['fortran_order'] = True
else:
# Totally non-contiguous data. We will have to make it C-contiguous
# before writing. Note that we need to test for C_CONTIGUOUS first
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
d['fortran_order'] = False
d['descr'] = dtype_to_descr(array.dtype)
return d
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version: tuple or None
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
Returns
-------
version : tuple of int
the file version which needs to be used to store the data
"""
import struct
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
# Pad the header with spaces and a final newline such that the magic
# string, the header-length short and the header are aligned on a
# 16-byte boundary. Hopefully, some system, possibly memory-mapping,
# can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
hlen = len(header)
if hlen < 256*256 and version in (None, (1, 0)):
version = (1, 0)
header_prefix = magic(1, 0) + struct.pack('<H', hlen)
elif hlen < 2**32 and version in (None, (2, 0)):
version = (2, 0)
header_prefix = magic(2, 0) + struct.pack('<I', hlen)
else:
msg = "Header length %s too big for version=%s"
msg %= (hlen, version)
raise ValueError(msg)
fp.write(header_prefix)
fp.write(header)
return version
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
Read an array header from a filelike object using the 2.0 file format
version.
This will leave the file object located just after the header.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : byte string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens)
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
if version == (1, 0):
hlength_str = _read_bytes(fp, 2, "array header length")
header_length = struct.unpack('<H', hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
elif version == (2, 0):
hlength_str = _read_bytes(fp, 4, "array header length")
header_length = struct.unpack('<I', hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
else:
raise ValueError("Invalid version %r" % version)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a 16-byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = numpy.dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
return d['shape'], d['fortran_order'], dtype
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a
``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass to pickle.dump, excluding
'protocol'. These are only useful when pickling objects in object
arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
If the array cannot be persisted. This includes the case of
allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
used_ver = _write_array_header(fp, header_data_from_array_1_0(array),
version)
# this warning can be removed when 1.9 has aged enough
if version != (2, 0) and used_ver == (2, 0):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out with version 2 of the
# pickle protocol.
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
pickle.dump(array, fp, protocol=2, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
def read_array(fp, allow_pickle=True, pickle_kwargs=None):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow reading pickled data. Default: True
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
Python 3.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
if sys.version_info[0] >= 3:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,))
raise
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
array = numpy.empty(count, dtype=dtype)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if fortran_order:
array.shape = shape[::-1]
array = array.transpose()
else:
array.shape = shape
return array
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
IOError
If the file is not found or cannot be opened correctly.
See Also
--------
memmap
"""
if not isinstance(filename, basestring):
raise ValueError("Filename must be a string. Memmap cannot use"
" existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = dict(
descr=dtype_to_descr(dtype),
fortran_order=fortran_order,
shape=shape,
)
# If we got here, then it should be safe to create the file.
fp = open(filename, mode+'b')
try:
used_ver = _write_array_header(fp, d, version)
# this warning can be removed when 1.9 has aged enough
if version != (2, 0) and used_ver == (2, 0):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
offset = fp.tell()
finally:
fp.close()
else:
# Read the header of the file first.
fp = open(filename, 'rb')
try:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
finally:
fp.close()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
|
boompieman/iim_project
|
refs/heads/master
|
project_python2/lib/python2.7/site-packages/pip/__init__.py
|
57
|
#!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "8.1.1"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
locale.setlocale(locale.LC_ALL, '')
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
|
denny820909/builder
|
refs/heads/master
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/status/web/baseweb.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os, weakref
from zope.interface import implements
from twisted.python import log
from twisted.application import strports, service
from twisted.internet import defer
from twisted.web import server, distrib, static
from twisted.spread import pb
from twisted.web.util import Redirect
from buildbot import config
from buildbot.interfaces import IStatusReceiver
from buildbot.status.web.base import StaticFile, createJinjaEnv
from buildbot.status.web.feeds import Rss20StatusResource, \
Atom10StatusResource
from buildbot.status.web.waterfall import WaterfallStatusResource
from buildbot.status.web.console import ConsoleStatusResource
from buildbot.status.web.olpb import OneLinePerBuild
from buildbot.status.web.grid import GridStatusResource
from buildbot.status.web.grid import TransposedGridStatusResource
from buildbot.status.web.changes import ChangesResource
from buildbot.status.web.builder import BuildersResource
from buildbot.status.web.buildstatus import BuildStatusStatusResource
from buildbot.status.web.slaves import BuildSlavesResource
from buildbot.status.web.status_json import JsonStatusResource
from buildbot.status.web.about import AboutBuildbot
from buildbot.status.web.authz import Authz
from buildbot.status.web.auth import AuthFailResource,AuthzFailResource, LoginResource, LogoutResource
from buildbot.status.web.root import RootPage
from buildbot.status.web.users import UsersResource
from buildbot.status.web.change_hook import ChangeHookResource
from twisted.cred.portal import IRealm, Portal
from twisted.cred import strcred
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword
from twisted.web import resource, guard
# this class contains the WebStatus class. Basic utilities are in base.py,
# and specific pages are each in their own module.
class WebStatus(service.MultiService):
implements(IStatusReceiver)
# TODO: IStatusReceiver is really about things which subscribe to hear
# about buildbot events. We need a different interface (perhaps a parent
# of IStatusReceiver) for status targets that don't subscribe, like the
# WebStatus class. buildbot.master.BuildMaster.loadConfig:737 asserts
# that everything in c['status'] provides IStatusReceiver, but really it
# should check that they provide IStatusTarget instead.
"""
The webserver provided by this class has the following resources:
/waterfall : the big time-oriented 'waterfall' display, with links
to individual changes, builders, builds, steps, and logs.
A number of query-arguments can be added to influence
the display.
/rss : a rss feed summarizing all failed builds. The same
query-arguments used by 'waterfall' can be added to
influence the feed output.
/atom : an atom feed summarizing all failed builds. The same
query-arguments used by 'waterfall' can be added to
influence the feed output.
/grid : another summary display that shows a grid of builds, with
sourcestamps on the x axis, and builders on the y. Query
arguments similar to those for the waterfall can be added.
/tgrid : similar to the grid display, but the commits are down the
left side, and the build hosts are across the top.
/builders/BUILDERNAME: a page summarizing the builder. This includes
references to the Schedulers that feed it,
any builds currently in the queue, which
buildslaves are designated or attached, and a
summary of the build process it uses.
/builders/BUILDERNAME/builds/NUM: a page describing a single Build
/builders/BUILDERNAME/builds/NUM/steps/STEPNAME: describes a single step
/builders/BUILDERNAME/builds/NUM/steps/STEPNAME/logs/LOGNAME: a StatusLog
/builders/_all/{force,stop}: force a build/stop building on all builders.
/buildstatus?builder=...&number=...: an embedded iframe for the console
/changes : summarize all ChangeSources
/changes/CHANGENUM: a page describing a single Change
/buildslaves : list all BuildSlaves
/buildslaves/SLAVENAME : describe a single BuildSlave
/one_line_per_build : summarize the last few builds, one line each
/one_line_per_build/BUILDERNAME : same, but only for a single builder
/about : describe this buildmaster (Buildbot and support library versions)
/change_hook[/DIALECT] : accepts changes from external sources, optionally
choosing the dialect that will be permitted
(i.e. github format, etc..)
and more! see the manual.
All URLs for pages which are not defined here are used to look
for files in PUBLIC_HTML, which defaults to BASEDIR/public_html.
This means that /robots.txt or /favicon.ico can be placed in
that directory
This webserver uses the jinja2 template system to generate the web pages
(see http://jinja.pocoo.org/2/) and by default loads pages from the
buildbot.status.web.templates package. Any file here can be overridden by placing
a corresponding file in the master's 'templates' directory.
The main customization points are layout.html which loads style sheet
(css) and provides header and footer content, and root.html, which
generates the root page.
All of the resources provided by this service use relative URLs to reach
each other. The only absolute links are the c['titleURL'] links at the
top and bottom of the page, and the buildbot home-page link at the
bottom.
Buildbot uses some generic classes to identify the type of object, and
some more specific classes for the various kinds of those types. It does
this by specifying both in the class attributes where applicable,
separated by a space. It is important that in your CSS you declare the
more generic class styles above the more specific ones. For example,
first define a style for .Event, and below that for .SUCCESS
The following CSS class names are used:
- Activity, Event, BuildStep, LastBuild: general classes
- waiting, interlocked, building, offline, idle: Activity states
- start, running, success, failure, warnings, skipped, exception:
LastBuild and BuildStep states
- Change: box with change
- Builder: box for builder name (at top)
- Project
- Time
"""
# we are not a ComparableMixin, and therefore the webserver will be
# rebuilt every time we reconfig. This is because WebStatus.putChild()
# makes it too difficult to tell whether two instances are the same or
# not (we'd have to do a recursive traversal of all children to discover
# all the changes).
def __init__(self, http_port=None, distrib_port=None, allowForce=None,
public_html="public_html", site=None, numbuilds=20,
num_events=200, num_events_max=None, auth=None,
order_console_by_time=False, changecommentlink=None,
revlink=None, projects=None, repositories=None,
authz=None, logRotateLength=None, maxRotatedFiles=None,
change_hook_dialects = {}, provide_feeds=None, jinja_loaders=None,
change_hook_auth=None):
"""Run a web server that provides Buildbot status.
@type http_port: int or L{twisted.application.strports} string
@param http_port: a strports specification describing which port the
buildbot should use for its web server, with the
Waterfall display as the root page. For backwards
compatibility this can also be an int. Use
'tcp:8000' to listen on that port, or
'tcp:12345:interface=127.0.0.1' if you only want
local processes to connect to it (perhaps because
you are using an HTTP reverse proxy to make the
buildbot available to the outside world, and do not
want to make the raw port visible).
@type distrib_port: int or L{twisted.application.strports} string
@param distrib_port: Use this if you want to publish the Waterfall
page using web.distrib instead. The most common
case is to provide a string that is an absolute
pathname to the unix socket on which the
publisher should listen
(C{os.path.expanduser(~/.twistd-web-pb)} will
match the default settings of a standard
twisted.web 'personal web server'). Another
possibility is to pass an integer, which means
the publisher should listen on a TCP socket,
allowing the web server to be on a different
machine entirely. Both forms are provided for
backwards compatibility; the preferred form is a
strports specification like
'unix:/home/buildbot/.twistd-web-pb'. Providing
a non-absolute pathname will probably confuse
the strports parser.
@param allowForce: deprecated; use authz instead
@param auth: deprecated; use with authz
@param authz: a buildbot.status.web.authz.Authz instance giving the authorization
parameters for this view
@param public_html: the path to the public_html directory for this display,
either absolute or relative to the basedir. The default
is 'public_html', which selects BASEDIR/public_html.
@type site: None or L{twisted.web.server.Site}
@param site: Use this if you want to define your own object instead of
using the default.`
@type numbuilds: int
@param numbuilds: Default number of entries in lists at the /one_line_per_build
and /builders/FOO URLs. This default can be overriden both programatically ---
by passing the equally named argument to constructors of OneLinePerBuildOneBuilder
and OneLinePerBuild --- and via the UI, by tacking ?numbuilds=xy onto the URL.
@type num_events: int
@param num_events: Default number of events to show in the waterfall.
@type num_events_max: int
@param num_events_max: The maximum number of events that are allowed to be
shown in the waterfall. The default value of C{None} will disable this
check
@type auth: a L{status.web.auth.IAuth} or C{None}
@param auth: an object that performs authentication to restrict access
to the C{allowForce} features. Ignored if C{allowForce}
is not C{True}. If C{auth} is C{None}, people can force or
stop builds without auth.
@type order_console_by_time: bool
@param order_console_by_time: Whether to order changes (commits) in the console
view according to the time they were created (for VCS like Git) or
according to their integer revision numbers (for VCS like SVN).
@type changecommentlink: callable, dict, tuple (2 or 3 strings) or C{None}
@param changecommentlink: adds links to ticket/bug ids in change comments,
see buildbot.status.web.base.changecommentlink for details
@type revlink: callable, dict, string or C{None}
@param revlink: decorations revision ids with links to a web-view,
see buildbot.status.web.base.revlink for details
@type projects: callable, dict or c{None}
@param projects: maps project identifiers to URLs, so that any project listed
is automatically decorated with a link to it's front page.
see buildbot.status.web.base.dictlink for details
@type repositories: callable, dict or c{None}
@param repositories: maps repository identifiers to URLs, so that any project listed
is automatically decorated with a link to it's web view.
see buildbot.status.web.base.dictlink for details
@type logRotateLength: None or int
@param logRotateLength: file size at which the http.log is rotated/reset.
If not set, the value set in the buildbot.tac will be used,
falling back to the BuildMaster's default value (1 Mb).
@type maxRotatedFiles: None or int
@param maxRotatedFiles: number of old http.log files to keep during log rotation.
If not set, the value set in the buildbot.tac will be used,
falling back to the BuildMaster's default value (10 files).
@type change_hook_dialects: None or dict
@param change_hook_dialects: If empty, disables change_hook support, otherwise
whitelists valid dialects. In the format of
{"dialect1": "Option1", "dialect2", None}
Where the values are options that will be passed
to the dialect
To enable the DEFAULT handler, use a key of DEFAULT
@type provide_feeds: None or list
@param provide_feeds: If empty, provides atom, json, and rss feeds.
Otherwise, a dictionary of strings of
the type of feeds provided. Current
possibilities are "atom", "json", and "rss"
@type jinja_loaders: None or list
@param jinja_loaders: If not empty, a list of additional Jinja2 loader
objects to search for templates.
"""
service.MultiService.__init__(self)
if type(http_port) is int:
http_port = "tcp:%d" % http_port
self.http_port = http_port
if distrib_port is not None:
if type(distrib_port) is int:
distrib_port = "tcp:%d" % distrib_port
if distrib_port[0] in "/~.": # pathnames
distrib_port = "unix:%s" % distrib_port
self.distrib_port = distrib_port
self.num_events = num_events
if num_events_max:
if num_events_max < num_events:
config.error(
"num_events_max must be greater than num_events")
self.num_events_max = num_events_max
self.public_html = public_html
# make up an authz if allowForce was given
if authz:
if allowForce is not None:
config.error(
"cannot use both allowForce and authz parameters")
if auth:
config.error(
"cannot use both auth and authz parameters (pass " +
"auth as an Authz parameter)")
else:
# invent an authz
if allowForce and auth:
authz = Authz(auth=auth, default_action="auth")
elif allowForce:
authz = Authz(default_action=True)
else:
if auth:
log.msg("Warning: Ignoring authentication. Search for 'authorization'"
" in the manual")
authz = Authz() # no authorization for anything
self.authz = authz
# check for correctness of HTTP auth parameters
if change_hook_auth is not None:
self.change_hook_auth = []
for checker in change_hook_auth:
if isinstance(checker, str):
try:
checker = strcred.makeChecker(checker)
except Exception, error:
config.error("Invalid change_hook checker description: %s" % (error,))
continue
elif not ICredentialsChecker.providedBy(checker):
config.error("change_hook checker doesn't provide ICredentialChecker: %r" % (checker,))
continue
if IUsernamePassword not in checker.credentialInterfaces:
config.error("change_hook checker doesn't support IUsernamePassword: %r" % (checker,))
continue
self.change_hook_auth.append(checker)
else:
self.change_hook_auth = None
self.orderConsoleByTime = order_console_by_time
# If we were given a site object, go ahead and use it. (if not, we add one later)
self.site = site
# keep track of our child services
self.http_svc = None
self.distrib_svc = None
# store the log settings until we create the site object
self.logRotateLength = logRotateLength
self.maxRotatedFiles = maxRotatedFiles
# create the web site page structure
self.childrenToBeAdded = {}
self.setupUsualPages(numbuilds=numbuilds, num_events=num_events,
num_events_max=num_events_max)
self.revlink = revlink
self.changecommentlink = changecommentlink
self.repositories = repositories
self.projects = projects
# keep track of cached connections so we can break them when we shut
# down. See ticket #102 for more details.
self.channels = weakref.WeakKeyDictionary()
# do we want to allow change_hook
self.change_hook_dialects = {}
if change_hook_dialects:
self.change_hook_dialects = change_hook_dialects
resource_obj = ChangeHookResource(dialects=self.change_hook_dialects)
if self.change_hook_auth is not None:
resource_obj = self.setupProtectedResource(
resource_obj, self.change_hook_auth)
self.putChild("change_hook", resource_obj)
# Set default feeds
if provide_feeds is None:
self.provide_feeds = ["atom", "json", "rss"]
else:
self.provide_feeds = provide_feeds
self.jinja_loaders = jinja_loaders
def setupProtectedResource(self, resource_obj, checkers):
class SimpleRealm(object):
"""
A realm which gives out L{ChangeHookResource} instances for authenticated
users.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if resource.IResource in interfaces:
return (resource.IResource, resource_obj, lambda: None)
raise NotImplementedError()
portal = Portal(SimpleRealm(), checkers)
credentialFactory = guard.BasicCredentialFactory('Protected area')
wrapper = guard.HTTPAuthSessionWrapper(portal, [credentialFactory])
return wrapper
def setupUsualPages(self, numbuilds, num_events, num_events_max):
#self.putChild("", IndexOrWaterfallRedirection())
self.putChild("waterfall", WaterfallStatusResource(num_events=num_events,
num_events_max=num_events_max))
self.putChild("grid", GridStatusResource())
self.putChild("console", ConsoleStatusResource(
orderByTime=self.orderConsoleByTime))
self.putChild("tgrid", TransposedGridStatusResource())
self.putChild("builders", BuildersResource(numbuilds=numbuilds)) # has builds/steps/logs
self.putChild("one_box_per_builder", Redirect("builders"))
self.putChild("changes", ChangesResource())
self.putChild("buildslaves", BuildSlavesResource())
self.putChild("buildstatus", BuildStatusStatusResource())
self.putChild("one_line_per_build",
OneLinePerBuild(numbuilds=numbuilds))
self.putChild("about", AboutBuildbot())
self.putChild("authfail", AuthFailResource())
self.putChild("authzfail", AuthzFailResource())
self.putChild("users", UsersResource())
self.putChild("login", LoginResource())
self.putChild("logout", LogoutResource())
def __repr__(self):
if self.http_port is None:
return "<WebStatus on path %s at %s>" % (self.distrib_port,
hex(id(self)))
if self.distrib_port is None:
return "<WebStatus on port %s at %s>" % (self.http_port,
hex(id(self)))
return ("<WebStatus on port %s and path %s at %s>" %
(self.http_port, self.distrib_port, hex(id(self))))
def setServiceParent(self, parent):
# this class keeps a *separate* link to the buildmaster, rather than
# just using self.parent, so that when we are "disowned" (and thus
# parent=None), any remaining HTTP clients of this WebStatus will still
# be able to get reasonable results.
self.master = parent.master
# set master in IAuth instance
if self.authz.auth:
self.authz.auth.master = self.master
def either(a,b): # a if a else b for py2.4
if a:
return a
else:
return b
rotateLength = either(self.logRotateLength, self.master.log_rotation.rotateLength)
maxRotatedFiles = either(self.maxRotatedFiles, self.master.log_rotation.maxRotatedFiles)
# Set up the jinja templating engine.
if self.revlink:
revlink = self.revlink
else:
revlink = self.master.config.revlink
self.templates = createJinjaEnv(revlink, self.changecommentlink,
self.repositories, self.projects, self.jinja_loaders)
if not self.site:
class RotateLogSite(server.Site):
def _openLogFile(self, path):
try:
from twisted.python.logfile import LogFile
log.msg("Setting up http.log rotating %s files of %s bytes each" %
(maxRotatedFiles, rotateLength))
if hasattr(LogFile, "fromFullPath"): # not present in Twisted-2.5.0
return LogFile.fromFullPath(path, rotateLength=rotateLength, maxRotatedFiles=maxRotatedFiles)
else:
log.msg("WebStatus: rotated http logs are not supported on this version of Twisted")
except ImportError, e:
log.msg("WebStatus: Unable to set up rotating http.log: %s" % e)
# if all else fails, just call the parent method
return server.Site._openLogFile(self, path)
# this will be replaced once we've been attached to a parent (and
# thus have a basedir and can reference BASEDIR)
root = static.Data("placeholder", "text/plain")
httplog = os.path.abspath(os.path.join(self.master.basedir, "http.log"))
self.site = RotateLogSite(root, logPath=httplog)
# the following items are accessed by HtmlResource when it renders
# each page.
self.site.buildbot_service = self
if self.http_port is not None:
self.http_svc = s = strports.service(self.http_port, self.site)
s.setServiceParent(self)
if self.distrib_port is not None:
f = pb.PBServerFactory(distrib.ResourcePublisher(self.site))
self.distrib_svc = s = strports.service(self.distrib_port, f)
s.setServiceParent(self)
self.setupSite()
service.MultiService.setServiceParent(self, parent)
def setupSite(self):
# this is responsible for creating the root resource. It isn't done
# at __init__ time because we need to reference the parent's basedir.
htmldir = os.path.abspath(os.path.join(self.master.basedir, self.public_html))
if os.path.isdir(htmldir):
log.msg("WebStatus using (%s)" % htmldir)
else:
log.msg("WebStatus: warning: %s is missing. Do you need to run"
" 'buildbot upgrade-master' on this buildmaster?" % htmldir)
# all static pages will get a 404 until upgrade-master is used to
# populate this directory. Create the directory, though, since
# otherwise we get internal server errors instead of 404s.
os.mkdir(htmldir)
root = StaticFile(htmldir)
root_page = RootPage()
root.putChild("", root_page)
root.putChild("shutdown", root_page)
root.putChild("cancel_shutdown", root_page)
for name, child_resource in self.childrenToBeAdded.iteritems():
root.putChild(name, child_resource)
status = self.getStatus()
if "rss" in self.provide_feeds:
root.putChild("rss", Rss20StatusResource(status))
if "atom" in self.provide_feeds:
root.putChild("atom", Atom10StatusResource(status))
if "json" in self.provide_feeds:
root.putChild("json", JsonStatusResource(status))
self.site.resource = root
def putChild(self, name, child_resource):
"""This behaves a lot like root.putChild() . """
self.childrenToBeAdded[name] = child_resource
def registerChannel(self, channel):
self.channels[channel] = 1 # weakrefs
@defer.inlineCallbacks
def stopService(self):
for channel in self.channels:
try:
channel.transport.loseConnection()
except:
log.msg("WebStatus.stopService: error while disconnecting"
" leftover clients")
log.err()
yield service.MultiService.stopService(self)
# having shut them down, now remove our child services so they don't
# start up again if we're re-started
if self.http_svc:
yield self.http_svc.disownServiceParent()
self.http_svc = None
if self.distrib_svc:
yield self.distrib_svc.disownServiceParent()
self.distrib_svc = None
def getStatus(self):
return self.master.getStatus()
def getChangeSvc(self):
return self.master.change_svc
def getPortnum(self):
# this is for the benefit of unit tests
s = list(self)[0]
return s._port.getHost().port
# What happened to getControl?!
#
# instead of passing control objects all over the place in the web
# code, at the few places where a control instance is required we
# find the requisite object manually, starting at the buildmaster.
# This is in preparation for removal of the IControl hierarchy
# entirely.
def checkConfig(self, otherStatusReceivers):
duplicate_webstatus=0
for osr in otherStatusReceivers:
if isinstance(osr,WebStatus):
if osr is self:
continue
# compare against myself and complain if the settings conflict
if self.http_port == osr.http_port:
if duplicate_webstatus == 0:
duplicate_webstatus = 2
else:
duplicate_webstatus += 1
if duplicate_webstatus:
config.error(
"%d Webstatus objects have same port: %s"
% (duplicate_webstatus, self.http_port),
)
# resources can get access to the IStatus by calling
# request.site.buildbot_service.getStatus()
|
popazerty/SDG-e2
|
refs/heads/master
|
lib/python/Screens/TimeDateInput.py
|
72
|
from Screen import Screen
from Components.config import ConfigClock, ConfigDateTime, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Pixmap import Pixmap
import time
import datetime
class TimeDateInput(Screen, ConfigListScreen):
def __init__(self, session, config_time=None, config_date=None):
Screen.__init__(self, session)
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig(config_date, config_time)
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list)
self.createSetup(self["config"])
def createConfig(self, conf_date, conf_time):
self.save_mask = 0
if conf_time:
self.save_mask |= 1
else:
conf_time = ConfigClock(default = time.time()),
if conf_date:
self.save_mask |= 2
else:
conf_date = ConfigDateTime(default = time.time(), formatstring = _("%d.%B %Y"), increment = 86400)
self.timeinput_date = conf_date
self.timeinput_time = conf_time
def createSetup(self, configlist):
self.list = [
getConfigListEntry(_("Date"), self.timeinput_date),
getConfigListEntry(_("Time"), self.timeinput_time)
]
configlist.list = self.list
configlist.l.setList(self.list)
def keySelect(self):
self.keyGo()
def getTimestamp(self, date, mytime):
d = time.localtime(date)
dt = datetime.datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(time.mktime(dt.timetuple()))
def keyGo(self):
time = self.getTimestamp(self.timeinput_date.value, self.timeinput_time.value)
if self.save_mask & 1:
self.timeinput_time.save()
if self.save_mask & 2:
self.timeinput_date.save()
self.close((True, time))
def keyCancel(self):
if self.save_mask & 1:
self.timeinput_time.cancel()
if self.save_mask & 2:
self.timeinput_date.cancel()
self.close((False,))
|
MediaKraken/MediaKraken_Deployment
|
refs/heads/master
|
source/common/common_serial.py
|
1
|
"""
Copyright (C) 2015 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import time
from kivy.utils import platform
# import the pyserial library for use in rs232c communications
if platform != 'android':
import serial
class CommonSerial:
"""
Class for interfacing via serial devices
"""
def __init__(self, dev_port='/dev/ttyUSB1', dev_baudrate=9600,
dev_parity=serial.PARITY_ODD,
dev_stopbits=serial.STOPBITS_TWO,
dev_bytesize=serial.SEVENBITS):
"""
Open serial device for read/write
"""
self.serial_device = serial.Serial(port=dev_port,
baudrate=dev_baudrate,
parity=dev_parity,
stopbits=dev_stopbits,
bytesize=dev_bytesize
)
self.serial_device.open()
self.serial_device.isOpen()
def com_serial_read_device(self):
"""
Read data from serial device
"""
time.sleep(1)
read_data = ''
while self.serial_device.inWaiting() > 0:
read_data += self.serial_device.read(1)
return read_data
def com_serial_close_device(self):
"""
Close serial device
"""
self.serial_device.close()
def com_serial_write_device(self, message):
"""
Send data to serial device
"""
self.serial_device.write(message)
|
klahnakoski/cloc
|
refs/heads/master
|
cloc/util/vendor/dateutil/tzwin.py
|
227
|
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import winreg
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, TZKEYNAME)
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
|
payal97/portal
|
refs/heads/develop
|
systers_portal/community/urls.py
|
2
|
from django.conf.urls import url
from .views import (CommunityLandingView, EditCommunityProfileView,
ViewCommunityProfileView, CommunityPageView,
AddCommunityPageView, EditCommunityPageView,
DeleteCommunityPageView, CommunityUsersView,
UserPermissionGroupsView, RequestCommunityView,
NewCommunityRequestsListView, ApproveRequestCommunityView,
RejectRequestCommunityView, ViewCommunityRequestView,
EditCommunityRequestView, AddCommunityView, CommunitySearch)
urlpatterns = [
url(r'add_community/$', AddCommunityView.as_view(),
name='add_community'),
url(r'search/$', CommunitySearch.as_view(), name="search"),
url(r'request_community/$', RequestCommunityView.as_view(),
name='request_community'),
url(r'community_requests', NewCommunityRequestsListView.as_view(),
name='unapproved_community_requests'),
url(r'^(?P<slug>[\w-]+)/edit_request/$', EditCommunityRequestView.as_view(),
name='edit_community_request'),
url(r'^(?P<slug>[\w-]+)/view_request/$', ViewCommunityRequestView.as_view(),
name='view_community_request'),
url(r'^(?P<slug>[\w-]+)/approve/$', ApproveRequestCommunityView.as_view(),
name='approve_community_request'),
url(r'(?P<slug>[\w-]+)/reject/$', RejectRequestCommunityView.as_view(),
name='reject_community_request'),
url(r'^(?P<slug>[\w-]+)/$', CommunityLandingView.as_view(),
name='view_community_landing'),
url(r'^(?P<slug>[\w-]+)/profile/$', ViewCommunityProfileView.as_view(),
name='view_community_profile'),
url(r'^(?P<slug>[\w-]+)/profile/edit/$',
EditCommunityProfileView.as_view(), name='edit_community_profile'),
url(r'^(?P<slug>[\w-]+)/p/add/$', AddCommunityPageView.as_view(),
name="add_community_page"),
url(r'^(?P<slug>[\w-]+)/p/(?P<page_slug>[\w-]+)/edit/$',
EditCommunityPageView.as_view(), name="edit_community_page"),
url(r'^(?P<slug>[\w-]+)/p/(?P<page_slug>[\w-]+)/delete/$',
DeleteCommunityPageView.as_view(), name="delete_community_page"),
url(r'^(?P<slug>[\w-]+)/p/(?P<page_slug>[\w-]+)/$',
CommunityPageView.as_view(), name="view_community_page"),
url(r'^(?P<slug>[\w-]+)/users/$', CommunityUsersView.as_view(),
name="community_users"),
url(r'^(?P<slug>[\w-]+)/user/(?P<username>[\w.@+-]+)/permissions/$',
UserPermissionGroupsView.as_view(), name="user_permission_groups"),
]
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/encodings/utf_16_le.py
|
860
|
""" Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
knifenomad/django
|
refs/heads/master
|
django/contrib/gis/db/backends/mysql/operations.py
|
328
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
function_names = {
'Distance': 'ST_Distance',
'Length': 'GLength',
'Union': 'ST_Union',
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'Difference', 'ForceRHR', 'GeoHash', 'Intersection', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'SymDifference', 'Transform', 'Translate',
}
if self.connection.mysql_version < (5, 6, 1):
unsupported.update({'Distance', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
|
stefan-andritoiu/upm
|
refs/heads/master
|
examples/python/light.py
|
6
|
#!/usr/bin/env python
# Author: Sarah Knepper <sarah.knepper@intel.com>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time
from upm import pyupm_light as lightObj
def main():
# Create the light sensor object using AIO pin 0
sensor = lightObj.Light(0)
# Read the input and print both the normalized ADC value and a
# rough lux value, waiting one second between readings
while 1:
print(sensor.name() + " normalized value is %f" % sensor.getNormalized()
+ ", which is roughly %d" % sensor.value() + " lux");
time.sleep(1)
# Delete the light sensor object
del lightObj
if __name__ == '__main__':
main()
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/ucs/ucs_vlans.py
|
64
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_vlans
short_description: Configures VLANs on Cisco UCS Manager
description:
- Configures VLANs on Cisco UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify VLANs are present and will create if needed.
- If C(absent), will verify VLANs are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name assigned to the VLAN.
- The VLAN name is case sensitive.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the VLAN is created.
required: yes
multicast_policy:
description:
- The multicast policy associated with this VLAN.
- This option is only valid if the Sharing Type field is set to None or Primary.
default: ''
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
- For upstream disjoint L2 networks, Cisco recommends that you choose common to create VLANs that apply to both fabrics.
choices: [common, A, B]
default: common
id:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- You cannot create VLANs with IDs from 4030 to 4047. This range of VLAN IDs is reserved.
- The VLAN IDs you specify must also be supported on the switch that you are using.
- VLANs in the LAN cloud and FCoE VLANs in the SAN cloud must have different IDs.
- Optional if state is absent.
required: yes
sharing:
description:
- The Sharing Type field.
- "Whether this VLAN is subdivided into private or secondary VLANs. This can be one of the following:"
- "none - This VLAN does not have any secondary or private VLANs. This is a regular VLAN."
- "primary - This VLAN can have one or more secondary VLANs, as shown in the Secondary VLANs area. This VLAN is a primary VLAN in the private VLAN domain."
- "isolated - This is a private VLAN associated with a primary VLAN. This VLAN is an Isolated VLAN."
- "community - This VLAN can communicate with other ports on the same community VLAN as well as the promiscuous port. This VLAN is a Community VLAN."
choices: [none, primary, isolated, community]
default: none
native:
description:
- Designates the VLAN as a native VLAN.
choices: ['yes', 'no']
default: 'no'
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure VLAN
ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
id: '2'
native: 'yes'
- name: Remove VLAN
ucs_vlans:
hostname: 172.16.143.150
username: admin
password: password
name: vlan2
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
name=dict(type='str', required=True),
multicast_policy=dict(type='str', default=''),
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
id=dict(type='str'),
sharing=dict(type='str', default='none', choices=['none', 'primary', 'isolated', 'community']),
native=dict(type='str', default='no', choices=['yes', 'no']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['id']],
],
)
ucs = UCSModule(module)
err = False
# UCSModule creation above verifies ucsmsdk is present and exits on failure, so additional imports are done below.
from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan
changed = False
try:
mo_exists = False
props_match = False
# dn is fabric/lan/net-<name> for common vlans or fabric/lan/[A or B]/net-<name> for A or B
dn_base = 'fabric/lan'
if module.params['fabric'] != 'common':
dn_base += '/' + module.params['fabric']
dn = dn_base + '/net-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(id=module.params['id'])
kwargs['default_net'] = module.params['native']
kwargs['sharing'] = module.params['sharing']
kwargs['mcast_policy_name'] = module.params['multicast_policy']
if (mo.check_prop_match(**kwargs)):
props_match = True
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = FabricVlan(
parent_mo_or_dn=dn_base,
name=module.params['name'],
id=module.params['id'],
default_net=module.params['native'],
sharing=module.params['sharing'],
mcast_policy_name=module.params['multicast_policy'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
Fab7c4/paparazzi
|
refs/heads/highwind
|
select_conf.py
|
1
|
start.py
|
adlnet-archive/edx-platform
|
refs/heads/master
|
common/djangoapps/edxmako/template.py
|
20
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from mako.template import Template as MakoTemplate
from edxmako.shortcuts import marketing_link
import edxmako
import edxmako.middleware
DJANGO_VARIABLES = ['output_encoding', 'encoding_errors']
# TODO: We should make this a Django Template subclass that simply has the MakoTemplate inside of it? (Intead of inheriting from MakoTemplate)
class Template(MakoTemplate):
"""
This bridges the gap between a Mako template and a djano template. It can
be rendered like it is a django template because the arguments are transformed
in a way that MakoTemplate can understand.
"""
def __init__(self, *args, **kwargs):
"""Overrides base __init__ to provide django variable overrides"""
if not kwargs.get('no_django', False):
overrides = {k: getattr(edxmako, k, None) for k in DJANGO_VARIABLES}
overrides['lookup'] = edxmako.LOOKUP['main']
kwargs.update(overrides)
super(Template, self).__init__(*args, **kwargs)
def render(self, context_instance):
"""
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
"""
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
# In various testing contexts, there might not be a current request context.
if getattr(edxmako.middleware.REQUEST_CONTEXT, "context", None):
for d in edxmako.middleware.REQUEST_CONTEXT.context:
context_dictionary.update(d)
for d in context_instance:
context_dictionary.update(d)
context_dictionary['settings'] = settings
context_dictionary['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_dictionary['django_context'] = context_instance
context_dictionary['marketing_link'] = marketing_link
return super(Template, self).render_unicode(**context_dictionary)
|
thaim/ansible
|
refs/heads/fix-broken-link
|
lib/ansible/modules/cloud/azure/azure_rm_rediscache.py
|
18
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_rediscache
version_added: "2.8"
short_description: Manage Azure Cache for Redis instance
description:
- Create, update and delete instance of Azure Cache for Redis.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
name:
description:
- Unique name of the Azure Cache for Redis to create or update.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
sku:
description:
- SKU info of Azure Cache for Redis.
suboptions:
name:
description:
- Type of Azure Cache for Redis to deploy.
choices:
- basic
- standard
- premium
required: True
size:
description:
- Size of Azure Cache for Redis to deploy.
- When I(sku=basic) or I(sku=standard), allowed values are C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6).
- When I(sku=premium), allowed values are C(P1), C(P2), C(P3), C(P4).
- Please see U(https://docs.microsoft.com/en-us/rest/api/redis/redis/create#sku) for allowed values.
choices:
- C0
- C1
- C2
- C3
- C4
- C5
- C6
- P1
- P2
- P3
- P4
required: True
enable_non_ssl_port:
description:
- When set I(enable_non_ssl_port=true), the non-ssl Redis server port 6379 will be enabled.
type: bool
default: false
maxfragmentationmemory_reserved:
description:
- Configures the amount of memory in MB that is reserved to accommodate for memory fragmentation.
- Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
maxmemory_reserved:
description:
- Configures the amount of memory in MB that is reserved for non-cache operations.
- Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
maxmemory_policy:
description:
- Configures the eviction policy of the cache.
- Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
choices:
- volatile_lru
- allkeys_lru
- volatile_random
- allkeys_random
- volatile_ttl
- noeviction
notify_keyspace_events:
description:
- Allows clients to receive notifications when certain events occur.
- Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
type: str
shard_count:
description:
- The number of shards to be created when I(sku=premium).
type: int
static_ip:
description:
- Static IP address. Required when deploying an Azure Cache for Redis inside an existing Azure virtual network.
subnet:
description:
- Subnet in a virtual network to deploy the Azure Cache for Redis in.
- It can be resource id of subnet, for example
/subscriptions/{subid}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
- It can be a dictionary where contains I(name), I(virtual_network_name) and I(resource_group).
- I(name). Name of the subnet.
- I(resource_group). Resource group name of the subnet.
- I(virtual_network_name). Name of virtual network to which this subnet belongs.
tenant_settings:
description:
- Dict of tenant settings.
type: dict
reboot:
description:
- Reboot specified Redis node(s). There can be potential data loss.
suboptions:
shard_id:
description:
- If clustering is enabled, the id of the shard to be rebooted.
type: int
reboot_type:
description:
- Which Redis node(s) to reboot.
choices:
- primary
- secondary
- all
default: all
regenerate_key:
description:
- Regenerate Redis cache's access keys.
suboptions:
key_type:
description:
- The Redis key to regenerate.
choices:
- primary
- secondary
wait_for_provisioning:
description:
- Wait till the Azure Cache for Redis instance provisioning_state is Succeeded.
- It takes several minutes for Azure Cache for Redis to be provisioned ready for use after creating/updating/rebooting.
- Set this option to C(true) to wait for provisioning_state. Set to C(false) if you don't care about provisioning_state.
- Poll wait timeout is 60 minutes.
type: bool
default: True
state:
description:
- Assert the state of the Azure Cache for Redis.
- Use C(present) to create or update an Azure Cache for Redis and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yunge Zhu(@yungezz)
'''
EXAMPLES = '''
- name: Create an Azure Cache for Redis
azure_rm_rediscache:
resource_group: myResourceGroup
name: myRedis
sku:
name: basic
size: C1
- name: Scale up the Azure Cache for Redis
azure_rm_rediscache:
resource_group: myResourceGroup
name: myRedis
sku:
name: standard
size: C1
tags:
testing: foo
- name: Force reboot the redis cache
azure_rm_rediscache:
resource_group: myResourceGroup
name: myRedisCache
reboot:
reboot_type: all
- name: Create Azure Cache for Redis with subnet
azure_rm_rediscache:
resource_group: myResourceGroup
name: myRedis
sku:
name: premium
size: P1
subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirt
ualNetwork/subnets/mySubnet"
'''
RETURN = '''
id:
description:
- Id of the Azure Cache for Redis.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis"
host_name:
description:
- Host name of the Azure Cache for Redis.
returned: when I(state=present)
type: str
sample: "myredis.redis.cache.windows.net"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.redis import RedisManagementClient
from azure.mgmt.redis.models import (RedisCreateParameters, RedisUpdateParameters, Sku)
except ImportError:
# This is handled in azure_rm_common
pass
sku_spec = dict(
name=dict(
type='str',
choices=['basic', 'standard', 'premium']),
size=dict(
type='str',
choices=['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'P1', 'P2', 'P3', 'P4']
)
)
reboot_spec = dict(
shard_id=dict(
type='str'
),
reboot_type=dict(
type='str',
choices=['primary', 'secondary', 'all']
)
)
regenerate_key_spec = dict(
key_type=dict(
type='str',
choices=['primary', 'secondary']
)
)
def rediscache_to_dict(redis):
result = dict(
id=redis.id,
name=redis.name,
location=redis.location,
sku=dict(
name=redis.sku.name.lower(),
size=redis.sku.family + str(redis.sku.capacity)
),
enable_non_ssl_port=redis.enable_non_ssl_port,
host_name=redis.host_name,
shard_count=redis.shard_count,
subnet=redis.subnet_id,
static_ip=redis.static_ip,
provisioning_state=redis.provisioning_state,
tenant_settings=redis.tenant_settings,
tags=redis.tags if redis.tags else None
)
for key in redis.redis_configuration:
result[hyphen_to_underline(key)] = hyphen_to_underline(redis.redis_configuration.get(key, None))
return result
def hyphen_to_underline(input):
if input and isinstance(input, str):
return input.replace("-", "_")
return input
def underline_to_hyphen(input):
if input and isinstance(input, str):
return input.replace("_", "-")
return input
def get_reboot_type(type):
if type == "primary":
return "PrimaryNode"
if type == "secondary":
return "SecondaryNode"
if type == "all":
return "AllNodes"
return type
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMRedisCaches(AzureRMModuleBase):
"""Configuration class for an Azure RM Cache for Redis resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
sku=dict(
type='dict',
options=sku_spec
),
enable_non_ssl_port=dict(
type='bool',
default=False
),
maxfragmentationmemory_reserved=dict(
type='int'
),
maxmemory_reserved=dict(
type='int'
),
maxmemory_policy=dict(
type='str',
choices=[
"volatile_lru",
"allkeys_lru",
"volatile_random",
"allkeys_random",
"volatile_ttl",
"noeviction"
]
),
notify_keyspace_events=dict(
type='str'
),
shard_count=dict(
type='int'
),
static_ip=dict(
type='str'
),
subnet=dict(
type='raw'
),
tenant_settings=dict(
type='dict'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
reboot=dict(
type='dict',
options=reboot_spec
),
regenerate_key=dict(
type='dict',
options=regenerate_key_spec
),
wait_for_provisioning=dict(
type='bool',
default='True'
)
)
self._client = None
self.resource_group = None
self.name = None
self.location = None
self.sku = None
self.size = None
self.enable_non_ssl_port = False
self.configuration_file_path = None
self.shard_count = None
self.static_ip = None
self.subnet = None
self.tenant_settings = None
self.reboot = None
self.regenerate_key = None
self.wait_for_provisioning = None
self.wait_for_provisioning_polling_interval_in_seconds = 30
self.wait_for_provisioning_polling_times = 120
self.tags = None
self.results = dict(
changed=False,
id=None,
host_name=None
)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMRedisCaches, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
old_response = None
response = None
to_be_updated = False
# define redis_configuration properties
self.redis_configuration_properties = ["maxfragmentationmemory_reserved",
"maxmemory_reserved",
"maxmemory_policy",
"notify_keyspace_events"]
# get management client
self._client = self.get_mgmt_svc_client(RedisManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2018-03-01')
# set location
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# check subnet exists
if self.subnet:
self.subnet = self.parse_subnet()
# get existing Azure Cache for Redis
old_response = self.get_rediscache()
if old_response:
self.results['id'] = old_response['id']
if self.state == 'present':
# if redis not exists
if not old_response:
self.log("Azure Cache for Redis instance doesn't exist")
to_be_updated = True
self.to_do = Actions.Create
if not self.sku:
self.fail("Please specify sku to creating new Azure Cache for Redis.")
else:
# redis exists already, do update
self.log("Azure Cache for Redis instance already exists")
update_tags, self.tags = self.update_tags(old_response.get('tags', None))
if update_tags:
to_be_updated = True
self.to_do = Actions.Update
# check if update
if self.check_update(old_response):
to_be_updated = True
self.to_do = Actions.Update
elif self.state == 'absent':
if old_response:
self.log("Delete Azure Cache for Redis instance")
self.results['id'] = old_response['id']
to_be_updated = True
self.to_do = Actions.Delete
else:
self.results['changed'] = False
self.log("Azure Cache for Redis {0} not exists.".format(self.name))
if to_be_updated:
self.log('Need to Create/Update Azure Cache for Redis')
self.results['changed'] = True
if self.check_mode:
return self.results
if self.to_do == Actions.Create:
response = self.create_rediscache()
self.results['id'] = response['id']
self.results['host_name'] = response['host_name']
if self.to_do == Actions.Update:
response = self.update_rediscache()
self.results['id'] = response['id']
self.results['host_name'] = response['host_name']
if self.to_do == Actions.Delete:
self.delete_rediscache()
self.log('Azure Cache for Redis instance deleted')
if self.reboot:
self.reboot['reboot_type'] = get_reboot_type(self.reboot['reboot_type'])
self.force_reboot_rediscache()
if self.regenerate_key:
response = self.rergenerate_rediscache_key()
self.results['keys'] = response
return self.results
def check_update(self, existing):
if self.enable_non_ssl_port is not None and existing['enable_non_ssl_port'] != self.enable_non_ssl_port:
self.log("enable_non_ssl_port diff: origin {0} / update {1}".format(existing['enable_non_ssl_port'], self.enable_non_ssl_port))
return True
if self.sku is not None:
if existing['sku']['name'] != self.sku['name']:
self.log("sku diff: origin {0} / update {1}".format(existing['sku']['name'], self.sku['name']))
return True
if existing['sku']['size'] != self.sku['size']:
self.log("size diff: origin {0} / update {1}".format(existing['sku']['size'], self.sku['size']))
return True
if self.tenant_settings is not None and existing['tenant_settings'] != self.tenant_settings:
self.log("tenant_settings diff: origin {0} / update {1}".format(existing['tenant_settings'], self.tenant_settings))
return True
if self.shard_count is not None and existing['shard_count'] != self.shard_count:
self.log("shard_count diff: origin {0} / update {1}".format(existing['shard_count'], self.shard_count))
return True
if self.subnet is not None and existing['subnet'] != self.subnet:
self.log("subnet diff: origin {0} / update {1}".format(existing['subnet'], self.subnet))
return True
if self.static_ip is not None and existing['static_ip'] != self.static_ip:
self.log("static_ip diff: origin {0} / update {1}".format(existing['static_ip'], self.static_ip))
return True
for config in self.redis_configuration_properties:
if getattr(self, config) is not None and existing.get(config, None) != getattr(self, config, None):
self.log("redis_configuration {0} diff: origin {1} / update {2}".format(config, existing.get(config, None), getattr(self, config, None)))
return True
return False
def create_rediscache(self):
'''
Creates Azure Cache for Redis instance with the specified configuration.
:return: deserialized Azure Cache for Redis instance state dictionary
'''
self.log(
"Creating Azure Cache for Redis instance {0}".format(self.name))
try:
redis_config = dict()
for key in self.redis_configuration_properties:
if getattr(self, key, None):
redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key))
params = RedisCreateParameters(
location=self.location,
sku=Sku(self.sku['name'].title(), self.sku['size'][0], self.sku['size'][1:]),
tags=self.tags,
redis_configuration=redis_config,
enable_non_ssl_port=self.enable_non_ssl_port,
tenant_settings=self.tenant_settings,
shard_count=self.shard_count,
subnet_id=self.subnet,
static_ip=self.static_ip
)
response = self._client.redis.create(resource_group_name=self.resource_group,
name=self.name,
parameters=params)
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
if self.wait_for_provisioning:
self.wait_for_redis_running()
except CloudError as exc:
self.log('Error attempting to create the Azure Cache for Redis instance.')
self.fail(
"Error creating the Azure Cache for Redis instance: {0}".format(str(exc)))
return rediscache_to_dict(response)
def update_rediscache(self):
'''
Updates Azure Cache for Redis instance with the specified configuration.
:return: Azure Cache for Redis instance state dictionary
'''
self.log(
"Updating Azure Cache for Redis instance {0}".format(self.name))
try:
redis_config = dict()
for key in self.redis_configuration_properties:
if getattr(self, key, None):
redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key))
params = RedisUpdateParameters(
redis_configuration=redis_config,
enable_non_ssl_port=self.enable_non_ssl_port,
tenant_settings=self.tenant_settings,
shard_count=self.shard_count,
sku=Sku(self.sku['name'].title(), self.sku['size'][0], self.sku['size'][1:]),
tags=self.tags
)
response = self._client.redis.update(resource_group_name=self.resource_group,
name=self.name,
parameters=params)
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
if self.wait_for_provisioning:
self.wait_for_redis_running()
except CloudError as exc:
self.log('Error attempting to update the Azure Cache for Redis instance.')
self.fail(
"Error updating the Azure Cache for Redis instance: {0}".format(str(exc)))
return rediscache_to_dict(response)
def delete_rediscache(self):
'''
Deletes specified Azure Cache for Redis instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Azure Cache for Redis instance {0}".format(self.name))
try:
response = self._client.redis.delete(resource_group_name=self.resource_group,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Azure Cache for Redis instance.')
self.fail(
"Error deleting the Azure Cache for Redis instance: {0}".format(str(e)))
return True
def get_rediscache(self):
'''
Gets the properties of the specified Azure Cache for Redis instance.
:return: Azure Cache for Redis instance state dictionary
'''
self.log("Checking if the Azure Cache for Redis instance {0} is present".format(self.name))
response = None
try:
response = self._client.redis.get(resource_group_name=self.resource_group,
name=self.name)
self.log("Response : {0}".format(response))
self.log("Azure Cache for Redis instance : {0} found".format(response.name))
return rediscache_to_dict(response)
except CloudError as ex:
self.log("Didn't find Azure Cache for Redis {0} in resource group {1}".format(
self.name, self.resource_group))
return False
def force_reboot_rediscache(self):
'''
Force reboot specified redis cache instance in the specified subscription and resource group.
:return: True
'''
self.log("Force reboot the redis cache instance {0}".format(self.name))
try:
response = self._client.redis.force_reboot(resource_group_name=self.resource_group,
name=self.name,
reboot_type=self.reboot['reboot_type'],
shard_id=self.reboot.get('shard_id'))
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
if self.wait_for_provisioning:
self.wait_for_redis_running()
except CloudError as e:
self.log('Error attempting to force reboot the redis cache instance.')
self.fail(
"Error force rebooting the redis cache instance: {0}".format(str(e)))
return True
def rergenerate_rediscache_key(self):
'''
Regenerate key of redis cache instance in the specified subscription and resource group.
:return: True
'''
self.log("Regenerate key of redis cache instance {0}".format(self.name))
try:
response = self._client.redis.regenerate_key(resource_group_name=self.resource_group,
name=self.name,
key_type=self.regenerate_key['key_type'].title())
return response.to_dict()
except CloudError as e:
self.log('Error attempting to regenerate key of redis cache instance.')
self.fail(
"Error regenerate key of redis cache instance: {0}".format(str(e)))
def get_subnet(self):
'''
Gets the properties of the specified subnet.
:return: subnet id
'''
self.log("Checking if the subnet {0} is present".format(self.name))
response = None
try:
response = self.network_client.subnets.get(self.subnet['resource_group'],
self.subnet['virtual_network_name'],
self.subnet['name'])
self.log("Subnet found : {0}".format(response))
return response.id
except CloudError as ex:
self.log("Didn't find subnet {0} in resource group {1}".format(
self.subnet['name'], self.subnet['resource_group']))
return False
def parse_subnet(self):
if isinstance(self.subnet, dict):
if 'virtual_network_name' not in self.subnet or \
'name' not in self.subnet:
self.fail("Subnet dict must contains virtual_network_name and name")
if 'resource_group' not in self.subnet:
self.subnet['resource_group'] = self.resource_group
subnet_id = self.get_subnet()
else:
subnet_id = self.subnet
return subnet_id
def wait_for_redis_running(self):
try:
response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
status = response.provisioning_state
polling_times = 0
while polling_times < self.wait_for_provisioning_polling_times:
if status.lower() != "succeeded":
polling_times += 1
time.sleep(self.wait_for_provisioning_polling_interval_in_seconds)
response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
status = response.provisioning_state
else:
return True
self.fail("Azure Cache for Redis is not running after 60 mins.")
except CloudError as e:
self.fail("Failed to get Azure Cache for Redis: {0}".format(str(e)))
def main():
"""Main execution"""
AzureRMRedisCaches()
if __name__ == '__main__':
main()
|
jiangzhuo/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_importlib/source/test_source_encoding.py
|
81
|
from .. import util
from . import util as source_util
machinery = util.import_importlib('importlib.machinery')
import codecs
import importlib.util
import re
import sys
import types
# Because sys.path gets essentially blanked, need to have unicodedata already
# imported for the parser to use.
import unicodedata
import unittest
import warnings
CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
class EncodingTest:
"""PEP 3120 makes UTF-8 the default encoding for source code
[default encoding].
PEP 263 specifies how that can change on a per-file basis. Either the first
or second line can contain the encoding line [encoding first line]
encoding second line]. If the file has the BOM marker it is considered UTF-8
implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is
an error [BOM and utf-8][BOM conflict].
"""
variable = '\u00fc'
character = '\u00c9'
source_line = "{0} = '{1}'\n".format(variable, character)
module_name = '_temp'
def run_test(self, source):
with source_util.create_modules(self.module_name) as mapping:
with open(mapping[self.module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(self.module_name,
mapping[self.module_name])
return self.load(loader)
def create_source(self, encoding):
encoding_line = "# coding={0}".format(encoding)
assert CODING_RE.match(encoding_line)
source_lines = [encoding_line.encode('utf-8')]
source_lines.append(self.source_line.encode(encoding))
return b'\n'.join(source_lines)
def test_non_obvious_encoding(self):
# Make sure that an encoding that has never been a standard one for
# Python works.
encoding_line = "# coding=koi8-r"
assert CODING_RE.match(encoding_line)
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
self.run_test(source)
# [default encoding]
def test_default_encoding(self):
self.run_test(self.source_line.encode('utf-8'))
# [encoding first line]
def test_encoding_on_first_line(self):
encoding = 'Latin-1'
source = self.create_source(encoding)
self.run_test(source)
# [encoding second line]
def test_encoding_on_second_line(self):
source = b"#/usr/bin/python\n" + self.create_source('Latin-1')
self.run_test(source)
# [BOM]
def test_bom(self):
self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8'))
# [BOM and utf-8]
def test_bom_and_utf_8(self):
source = codecs.BOM_UTF8 + self.create_source('utf-8')
self.run_test(source)
# [BOM conflict]
def test_bom_conflict(self):
source = codecs.BOM_UTF8 + self.create_source('latin-1')
with self.assertRaises(SyntaxError):
self.run_test(source)
class EncodingTestPEP451(EncodingTest):
def load(self, loader):
module = types.ModuleType(self.module_name)
module.__spec__ = importlib.util.spec_from_loader(self.module_name, loader)
loader.exec_module(module)
return module
Frozen_EncodingTestPEP451, Source_EncodingTestPEP451 = util.test_both(
EncodingTestPEP451, machinery=machinery)
class EncodingTestPEP302(EncodingTest):
def load(self, loader):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(self.module_name)
Frozen_EncodingTestPEP302, Source_EncodingTestPEP302 = util.test_both(
EncodingTestPEP302, machinery=machinery)
class LineEndingTest:
r"""Source written with the three types of line endings (\n, \r\n, \r)
need to be readable [cr][crlf][lf]."""
def run_test(self, line_ending):
module_name = '_temp'
source_lines = [b"a = 42", b"b = -13", b'']
source = line_ending.join(source_lines)
with source_util.create_modules(module_name) as mapping:
with open(mapping[module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(module_name,
mapping[module_name])
return self.load(loader, module_name)
# [cr]
def test_cr(self):
self.run_test(b'\r')
# [crlf]
def test_crlf(self):
self.run_test(b'\r\n')
# [lf]
def test_lf(self):
self.run_test(b'\n')
class LineEndingTestPEP451(LineEndingTest):
def load(self, loader, module_name):
module = types.ModuleType(module_name)
module.__spec__ = importlib.util.spec_from_loader(module_name, loader)
loader.exec_module(module)
return module
Frozen_LineEndingTestPEP451, Source_LineEndingTestPEP451 = util.test_both(
LineEndingTestPEP451, machinery=machinery)
class LineEndingTestPEP302(LineEndingTest):
def load(self, loader, module_name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(module_name)
Frozen_LineEndingTestPEP302, Source_LineEndingTestPEP302 = util.test_both(
LineEndingTestPEP302, machinery=machinery)
if __name__ == '__main__':
unittest.main()
|
pombredanne/cpe
|
refs/heads/master
|
docs/conf.py
|
2
|
# -*- coding: utf-8 -*-
#
# CPE documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 26 01:11:34 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
BASE_DIR = os.path.join(os.path.dirname(__file__), "..")
PKG_DIR = os.path.join(BASE_DIR, "cpe")
meta = {}
with open(os.path.join(PKG_DIR, "__meta__.py"), 'rb') as f:
exec(f.read(), meta)
# Project constants
PROJECT_NAME = meta['__packagename__'].upper()
COPYRIGHT = u'2013, {0}'.format(meta['__author__'])
TITLE_SUFFIX = u'Documentation'
SOURCE_START_FILE = u'index'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
# Include documentation from docstrings
'sphinx.ext.autodoc',
# Execute tests in docstrings
'sphinx.ext.doctest',
# Link to other projects' documentation
'sphinx.ext.intersphinx',
# Support for todo items
'sphinx.ext.todo',
# Collect doc coverage stats
'sphinx.ext.coverage',
# Add links to highlighted source code
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of all reST source files.
# The recommended encoding, and the default value, is 'utf-8-sig'.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = SOURCE_START_FILE
# General information about the project.
project = PROJECT_NAME
copyright = COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = meta['__version__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u"{0} v{1} {2}".format(project, release, TITLE_SUFFIX)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CPEdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(SOURCE_START_FILE, 'CPE.tex', html_title, meta['__author__'], 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(SOURCE_START_FILE, PROJECT_NAME, html_title, [meta['__author__']], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(SOURCE_START_FILE, PROJECT_NAME, html_title, meta['__author__'],
PROJECT_NAME, meta['__summary__'], 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
Tejal011089/osmosis_erpnext
|
refs/heads/develop
|
erpnext/buying/report/purchase_order_trends/__init__.py
|
12133432
| |
mushtaqak/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/tests/views/__init__.py
|
12133432
| |
scotthartbti/android_external_chromium_org
|
refs/heads/kk44
|
tools/telemetry/telemetry/page/page_measurement_unittest_base.py
|
24
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import page as page_module
from telemetry.page import page_set
from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests
class PageMeasurementUnitTestBase(unittest.TestCase):
"""unittest.TestCase-derived class to help in the construction of unit tests
for a measurement."""
def CreatePageSetFromFileInUnittestDataDir(self, test_filename):
return self.CreatePageSet('file://' + test_filename)
def CreatePageSet(self, test_filename):
base_dir = util.GetUnittestDataDir()
ps = page_set.PageSet(file_path=base_dir)
page = page_module.Page(test_filename, ps, base_dir=base_dir)
setattr(page, 'smoothness', {'action': 'scroll'})
ps.pages.append(page)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
measurement.AddCommandLineOptions(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options)
options.output_file = None
options.output_format = 'none'
options.output_trace_tag = None
return page_runner.Run(measurement, ps, expectations, options)
|
ssorgatem/pulsar
|
refs/heads/master
|
test/manager_drmaa_test.py
|
4
|
from .test_utils import (
BaseManagerTestCase,
skip_unless_module
)
from pulsar.managers.queued_drmaa import DrmaaQueueManager
class DrmaaManagerTest(BaseManagerTestCase):
def setUp(self):
super(DrmaaManagerTest, self).setUp()
self._set_manager()
def tearDown(self):
super(DrmaaManagerTest, self).setUp()
self.manager.shutdown()
def _set_manager(self, **kwds):
self.manager = DrmaaQueueManager('_default_', self.app, **kwds)
@skip_unless_module("drmaa")
def test_simple_execution(self):
self._test_simple_execution(self.manager)
@skip_unless_module("drmaa")
def test_cancel(self):
self._test_cancelling(self.manager)
|
blakfeld/ansible-modules-extras
|
refs/heads/devel
|
packaging/os/homebrew_tap.py
|
66
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: homebrew_tap
author: "Daniel Jaouen (@danieljaouen)"
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
tap:
description:
- The repository to tap.
required: true
state:
description:
- state of the repository.
choices: [ 'present', 'absent' ]
required: false
default: 'present'
requirements: [ homebrew ]
'''
EXAMPLES = '''
homebrew_tap: tap=homebrew/dupes state=present
homebrew_tap: tap=homebrew/dupes state=absent
homebrew_tap: tap=homebrew/dupes,homebrew/science state=present
'''
def a_valid_tap(tap):
'''Returns True if the tap is valid.'''
regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
return regex.match(tap)
def already_tapped(module, brew_path, tap):
'''Returns True if already tapped.'''
rc, out, err = module.run_command([
brew_path,
'tap',
])
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
return tap.lower() in taps
def add_tap(module, brew_path, tap):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif not already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'tap',
tap,
])
if already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully tapped: %s' % tap
else:
failed = True
msg = 'failed to tap: %s' % tap
else:
msg = 'already tapped: %s' % tap
return (failed, changed, msg)
def add_taps(module, brew_path, taps):
'''Adds one or more taps.'''
failed, unchanged, added, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = add_tap(module, brew_path, tap)
if failed:
break
if changed:
added += 1
else:
unchanged += 1
if failed:
msg = 'added: %d, unchanged: %d, error: ' + msg
msg = msg % (added, unchanged)
elif added:
changed = True
msg = 'added: %d, unchanged: %d' % (added, unchanged)
else:
msg = 'added: %d, unchanged: %d' % (added, unchanged)
return (failed, changed, msg)
def remove_tap(module, brew_path, tap):
'''Removes a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'untap',
tap,
])
if not already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully untapped: %s' % tap
else:
failed = True
msg = 'failed to untap: %s' % tap
else:
msg = 'already untapped: %s' % tap
return (failed, changed, msg)
def remove_taps(module, brew_path, taps):
'''Removes one or more taps.'''
failed, unchanged, removed, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = remove_tap(module, brew_path, tap)
if failed:
break
if changed:
removed += 1
else:
unchanged += 1
if failed:
msg = 'removed: %d, unchanged: %d, error: ' + msg
msg = msg % (removed, unchanged)
elif removed:
changed = True
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
else:
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
return (failed, changed, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], required=True),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
brew_path = module.get_bin_path(
'brew',
required=True,
opt_dirs=['/usr/local/bin']
)
taps = module.params['name'].split(',')
if module.params['state'] == 'present':
failed, changed, msg = add_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
elif module.params['state'] == 'absent':
failed, changed, msg = remove_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
xujb/odoo
|
refs/heads/8.0
|
addons/payment_adyen/models/adyen.py
|
165
|
# -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
adyen_tx_values = dict(tx_values)
adyen_tx_values.update({
'merchantReference': tx_values['reference'],
'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'currencyCode': tx_values['currency'] and tx_values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': partner_values['lang'],
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
})
if adyen_tx_values.get('return_url'):
adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')})
adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values)
return partner_values, adyen_tx_values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'adyen_psp_reference': fields.char('Adyen PSP Reference'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Adyen: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'adyen_psp_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'adyen_psp_reference': data.get('pspReference'),
})
return True
else:
error = 'Adyen: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
Danath/gyp
|
refs/heads/master
|
test/mac/gyptest-loadable-module.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests that a loadable_module target is built correctly.
"""
import TestGyp
import os
import struct
import sys
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'loadable-module'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Binary.
binary = test.built_file_path(
'test_loadable_module.plugin/Contents/MacOS/test_loadable_module',
chdir=CHDIR)
test.must_exist(binary)
MH_BUNDLE = 8
if struct.unpack('4I', open(binary, 'rb').read(16))[3] != MH_BUNDLE:
test.fail_test()
# Info.plist.
info_plist = test.built_file_path(
'test_loadable_module.plugin/Contents/Info.plist', chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, """
<key>CFBundleExecutable</key>
<string>test_loadable_module</string>
""")
# PkgInfo.
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/PkgInfo', chdir=CHDIR)
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/Resources', chdir=CHDIR)
test.pass_test()
|
jarshwah/django
|
refs/heads/master
|
tests/i18n/test_compilation.py
|
16
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext as gettext_module
import os
import stat
import unittest
from subprocess import Popen
from django.core.management import (
CommandError, call_command, execute_from_command_line,
)
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six, translation
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import ugettext
from .utils import RunInTmpDirMixin, copytree
has_msgfmt = find_command('msgfmt')
@unittest.skipUnless(has_msgfmt, 'msgfmt is mandatory for compilation tests')
class MessageCompilationTests(RunInTmpDirMixin, SimpleTestCase):
work_subdir = 'commands'
class PoFileTests(MessageCompilationTests):
LOCALE = 'es_AR'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
with self.assertRaises(CommandError) as cm:
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertIn("file has a BOM (Byte Order Mark)", cm.exception.args[0])
self.assertFalse(os.path.exists(self.MO_FILE))
def test_no_write_access(self):
mo_file_en = 'locale/en/LC_MESSAGES/django.mo'
err_buffer = StringIO()
# put file in read-only mode
old_mode = os.stat(mo_file_en).st_mode
os.chmod(mo_file_en, stat.S_IREAD)
try:
call_command('compilemessages', locale=['en'], stderr=err_buffer, verbosity=0)
err = err_buffer.getvalue()
self.assertIn("not writable location", force_text(err))
finally:
os.chmod(mo_file_en, old_mode)
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE = 'fr'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_percent_symbol_in_po_file(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE))
class MultipleLocaleCompilationTests(MessageCompilationTests):
MO_FILE_HR = None
MO_FILE_FR = None
def setUp(self):
super(MultipleLocaleCompilationTests, self).setUp()
localedir = os.path.join(self.test_dir, 'locale')
self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')
self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')
def test_one_locale(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
def test_multiple_locales(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
self.assertTrue(os.path.exists(self.MO_FILE_FR))
class ExcludedLocaleCompilationTests(MessageCompilationTests):
work_subdir = 'exclude'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo'
def setUp(self):
super(ExcludedLocaleCompilationTests, self).setUp()
copytree('canned_locale', 'locale')
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'compilemessages'])
def test_one_locale_excluded(self):
call_command('compilemessages', exclude=['it'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertTrue(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded(self):
call_command('compilemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_one_locale_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
class CompilationErrorHandling(MessageCompilationTests):
def test_error_reported_by_msgfmt(self):
# po file contains wrong po formatting.
with self.assertRaises(CommandError):
call_command('compilemessages', locale=['ja'], verbosity=0)
def test_msgfmt_error_including_non_ascii(self):
# po file contains invalid msgstr content (triggers non-ascii error content).
# Make sure the output of msgfmt is unaffected by the current locale.
env = os.environ.copy()
env.update({str('LANG'): str('C')})
with mock.patch('django.core.management.utils.Popen', lambda *args, **kwargs: Popen(*args, env=env, **kwargs)):
if six.PY2:
# Various assertRaises on PY2 don't support unicode error messages.
try:
call_command('compilemessages', locale=['ko'], verbosity=0)
except CommandError as err:
self.assertIn("' cannot start a field name", six.text_type(err))
else:
cmd = MakeMessagesCommand()
if cmd.gettext_version < (0, 18, 3):
self.skipTest("python-brace-format is a recent gettext addition.")
with self.assertRaisesMessage(CommandError, "' cannot start a field name"):
call_command('compilemessages', locale=['ko'], verbosity=0)
class ProjectAndAppTests(MessageCompilationTests):
LOCALE = 'ru'
PROJECT_MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
APP_MO_FILE = 'app_with_locale/locale/%s/LC_MESSAGES/django.mo' % LOCALE
class FuzzyTranslationTest(ProjectAndAppTests):
def setUp(self):
super(FuzzyTranslationTest, self).setUp()
gettext_module._translations = {} # flush cache or test will be useless
def test_nofuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Vodka'))
def test_fuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], fuzzy=True, stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(ugettext('Lenin'), force_text('Ленин'))
self.assertEqual(ugettext('Vodka'), force_text('Водка'))
class AppCompilationTest(ProjectAndAppTests):
def test_app_locale_compiled(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.PROJECT_MO_FILE))
self.assertTrue(os.path.exists(self.APP_MO_FILE))
|
jacek99/corepost
|
refs/heads/master
|
corepost/test/home_resource.py
|
1
|
'''
Server tests
@author: jacekf
'''
from corepost.web import RESTResource, route
from corepost.enums import Http, MediaType, HttpHeader
from twisted.internet import defer
from xml.etree import ElementTree
import json, yaml
class HomeApp():
def __init__(self,*args,**kwargs):
self.issue1 = "issue 1"
@route("/",Http.GET)
@defer.inlineCallbacks
def root(self,request,**kwargs):
yield 1
request.write("%s" % kwargs)
request.finish()
@route("/test",Http.GET)
def test(self,request,**kwargs):
return "%s" % kwargs
@route("/test/<int:numericid>/resource/<stringid>",Http.GET)
def test_get_resources(self,request,numericid,stringid,**kwargs):
return "%s - %s" % (numericid,stringid)
@route("/post",(Http.POST,Http.PUT))
def test_post(self,request,**kwargs):
return "%s" % kwargs
@route("/put",(Http.POST,Http.PUT))
def test_put(self,request,**kwargs):
return "%s" % kwargs
@route("/postput",(Http.POST,Http.PUT))
def test_postput(self,request,**kwargs):
return "%s" % kwargs
@route("/delete",Http.DELETE)
def test_delete(self,request,**kwargs):
return "%s" % kwargs
@route("/post/json",(Http.POST,Http.PUT))
def test_json(self,request,**kwargs):
return "%s" % json.dumps(request.json)
@route("/post/xml",(Http.POST,Http.PUT))
def test_xml(self,request,**kwargs):
return "%s" % ElementTree.tostring(request.xml)
@route("/post/yaml",(Http.POST,Http.PUT))
def test_yaml(self,request,**kwargs):
return "%s" % yaml.dump(request.yaml,indent=4,width=130,default_flow_style=False)
##################################################################
# same URLs, routed by incoming content type
###################################################################
@route("/post/by/content",(Http.POST,Http.PUT),MediaType.APPLICATION_JSON)
def test_content_app_json(self,request,**kwargs):
return request.received_headers[HttpHeader.CONTENT_TYPE]
@route("/post/by/content",(Http.POST,Http.PUT),(MediaType.TEXT_XML,MediaType.APPLICATION_XML))
def test_content_xml(self,request,**kwargs):
return request.received_headers[HttpHeader.CONTENT_TYPE]
@route("/post/by/content",(Http.POST,Http.PUT),MediaType.TEXT_YAML)
def test_content_yaml(self,request,**kwargs):
return request.received_headers[HttpHeader.CONTENT_TYPE]
@route("/post/by/content",(Http.POST,Http.PUT))
def test_content_catch_all(self,request,**kwargs):
return MediaType.WILDCARD
##################################################################
# one URL, serving different content types
###################################################################
@route("/return/by/accept")
def test_return_content_by_accepts(self,request,**kwargs):
val = [{"test1":"Test1"},{"test2":"Test2"}]
return val
@route("/return/by/accept/deferred")
@defer.inlineCallbacks
def test_return_content_by_accept_deferred(self,request,**kwargs):
"""Ensure support for inline callbacks and deferred"""
val = yield [{"test1":"Test1"},{"test2":"Test2"}]
defer.returnValue(val)
@route("/return/by/accept/class")
def test_return_class_content_by_accepts(self,request,**kwargs):
"""Uses Python class instead of dict/list"""
class TestReturn:
"""Test return class"""
def __init__(self):
self.__t1 = 'Test'
t1 = TestReturn()
t1.test1 = 'Test1'
t2 = TestReturn()
t2.test2="Test2"
return (t1,t2)
####################################
# Issues
####################################
@route("/issues/1")
def test_issue_1(self,request,**kwargs):
return self.issue1
####################################
# extra HTTP methods
####################################
@route("/methods/head",Http.HEAD)
def test_head_http(self,request,**kwargs):
return ""
@route("/methods/options",Http.OPTIONS)
def test_options_http(self,request,**kwargs):
return "OPTIONS"
@route("/methods/patch",Http.PATCH)
def test_patch_http(self,request,**kwargs):
return "PATCH=%s" % kwargs
def run_app_home():
app = RESTResource((HomeApp(),))
app.run()
if __name__ == "__main__":
run_app_home()
|
public-ink/public-ink
|
refs/heads/master
|
server/appengine-staging/lib/unidecode/x07b.py
|
252
|
data = (
'Mang ', # 0x00
'Zhu ', # 0x01
'Utsubo ', # 0x02
'Du ', # 0x03
'Ji ', # 0x04
'Xiao ', # 0x05
'Ba ', # 0x06
'Suan ', # 0x07
'Ji ', # 0x08
'Zhen ', # 0x09
'Zhao ', # 0x0a
'Sun ', # 0x0b
'Ya ', # 0x0c
'Zhui ', # 0x0d
'Yuan ', # 0x0e
'Hu ', # 0x0f
'Gang ', # 0x10
'Xiao ', # 0x11
'Cen ', # 0x12
'Pi ', # 0x13
'Bi ', # 0x14
'Jian ', # 0x15
'Yi ', # 0x16
'Dong ', # 0x17
'Shan ', # 0x18
'Sheng ', # 0x19
'Xia ', # 0x1a
'Di ', # 0x1b
'Zhu ', # 0x1c
'Na ', # 0x1d
'Chi ', # 0x1e
'Gu ', # 0x1f
'Li ', # 0x20
'Qie ', # 0x21
'Min ', # 0x22
'Bao ', # 0x23
'Tiao ', # 0x24
'Si ', # 0x25
'Fu ', # 0x26
'Ce ', # 0x27
'Ben ', # 0x28
'Pei ', # 0x29
'Da ', # 0x2a
'Zi ', # 0x2b
'Di ', # 0x2c
'Ling ', # 0x2d
'Ze ', # 0x2e
'Nu ', # 0x2f
'Fu ', # 0x30
'Gou ', # 0x31
'Fan ', # 0x32
'Jia ', # 0x33
'Ge ', # 0x34
'Fan ', # 0x35
'Shi ', # 0x36
'Mao ', # 0x37
'Po ', # 0x38
'Sey ', # 0x39
'Jian ', # 0x3a
'Qiong ', # 0x3b
'Long ', # 0x3c
'Souke ', # 0x3d
'Bian ', # 0x3e
'Luo ', # 0x3f
'Gui ', # 0x40
'Qu ', # 0x41
'Chi ', # 0x42
'Yin ', # 0x43
'Yao ', # 0x44
'Xian ', # 0x45
'Bi ', # 0x46
'Qiong ', # 0x47
'Gua ', # 0x48
'Deng ', # 0x49
'Jiao ', # 0x4a
'Jin ', # 0x4b
'Quan ', # 0x4c
'Sun ', # 0x4d
'Ru ', # 0x4e
'Fa ', # 0x4f
'Kuang ', # 0x50
'Zhu ', # 0x51
'Tong ', # 0x52
'Ji ', # 0x53
'Da ', # 0x54
'Xing ', # 0x55
'Ce ', # 0x56
'Zhong ', # 0x57
'Kou ', # 0x58
'Lai ', # 0x59
'Bi ', # 0x5a
'Shai ', # 0x5b
'Dang ', # 0x5c
'Zheng ', # 0x5d
'Ce ', # 0x5e
'Fu ', # 0x5f
'Yun ', # 0x60
'Tu ', # 0x61
'Pa ', # 0x62
'Li ', # 0x63
'Lang ', # 0x64
'Ju ', # 0x65
'Guan ', # 0x66
'Jian ', # 0x67
'Han ', # 0x68
'Tong ', # 0x69
'Xia ', # 0x6a
'Zhi ', # 0x6b
'Cheng ', # 0x6c
'Suan ', # 0x6d
'Shi ', # 0x6e
'Zhu ', # 0x6f
'Zuo ', # 0x70
'Xiao ', # 0x71
'Shao ', # 0x72
'Ting ', # 0x73
'Ce ', # 0x74
'Yan ', # 0x75
'Gao ', # 0x76
'Kuai ', # 0x77
'Gan ', # 0x78
'Chou ', # 0x79
'Kago ', # 0x7a
'Gang ', # 0x7b
'Yun ', # 0x7c
'O ', # 0x7d
'Qian ', # 0x7e
'Xiao ', # 0x7f
'Jian ', # 0x80
'Pu ', # 0x81
'Lai ', # 0x82
'Zou ', # 0x83
'Bi ', # 0x84
'Bi ', # 0x85
'Bi ', # 0x86
'Ge ', # 0x87
'Chi ', # 0x88
'Guai ', # 0x89
'Yu ', # 0x8a
'Jian ', # 0x8b
'Zhao ', # 0x8c
'Gu ', # 0x8d
'Chi ', # 0x8e
'Zheng ', # 0x8f
'Jing ', # 0x90
'Sha ', # 0x91
'Zhou ', # 0x92
'Lu ', # 0x93
'Bo ', # 0x94
'Ji ', # 0x95
'Lin ', # 0x96
'Suan ', # 0x97
'Jun ', # 0x98
'Fu ', # 0x99
'Zha ', # 0x9a
'Gu ', # 0x9b
'Kong ', # 0x9c
'Qian ', # 0x9d
'Quan ', # 0x9e
'Jun ', # 0x9f
'Chui ', # 0xa0
'Guan ', # 0xa1
'Yuan ', # 0xa2
'Ce ', # 0xa3
'Ju ', # 0xa4
'Bo ', # 0xa5
'Ze ', # 0xa6
'Qie ', # 0xa7
'Tuo ', # 0xa8
'Luo ', # 0xa9
'Dan ', # 0xaa
'Xiao ', # 0xab
'Ruo ', # 0xac
'Jian ', # 0xad
'Xuan ', # 0xae
'Bian ', # 0xaf
'Sun ', # 0xb0
'Xiang ', # 0xb1
'Xian ', # 0xb2
'Ping ', # 0xb3
'Zhen ', # 0xb4
'Sheng ', # 0xb5
'Hu ', # 0xb6
'Shi ', # 0xb7
'Zhu ', # 0xb8
'Yue ', # 0xb9
'Chun ', # 0xba
'Lu ', # 0xbb
'Wu ', # 0xbc
'Dong ', # 0xbd
'Xiao ', # 0xbe
'Ji ', # 0xbf
'Jie ', # 0xc0
'Huang ', # 0xc1
'Xing ', # 0xc2
'Mei ', # 0xc3
'Fan ', # 0xc4
'Chui ', # 0xc5
'Zhuan ', # 0xc6
'Pian ', # 0xc7
'Feng ', # 0xc8
'Zhu ', # 0xc9
'Hong ', # 0xca
'Qie ', # 0xcb
'Hou ', # 0xcc
'Qiu ', # 0xcd
'Miao ', # 0xce
'Qian ', # 0xcf
'[?] ', # 0xd0
'Kui ', # 0xd1
'Sik ', # 0xd2
'Lou ', # 0xd3
'Yun ', # 0xd4
'He ', # 0xd5
'Tang ', # 0xd6
'Yue ', # 0xd7
'Chou ', # 0xd8
'Gao ', # 0xd9
'Fei ', # 0xda
'Ruo ', # 0xdb
'Zheng ', # 0xdc
'Gou ', # 0xdd
'Nie ', # 0xde
'Qian ', # 0xdf
'Xiao ', # 0xe0
'Cuan ', # 0xe1
'Gong ', # 0xe2
'Pang ', # 0xe3
'Du ', # 0xe4
'Li ', # 0xe5
'Bi ', # 0xe6
'Zhuo ', # 0xe7
'Chu ', # 0xe8
'Shai ', # 0xe9
'Chi ', # 0xea
'Zhu ', # 0xeb
'Qiang ', # 0xec
'Long ', # 0xed
'Lan ', # 0xee
'Jian ', # 0xef
'Bu ', # 0xf0
'Li ', # 0xf1
'Hui ', # 0xf2
'Bi ', # 0xf3
'Di ', # 0xf4
'Cong ', # 0xf5
'Yan ', # 0xf6
'Peng ', # 0xf7
'Sen ', # 0xf8
'Zhuan ', # 0xf9
'Pai ', # 0xfa
'Piao ', # 0xfb
'Dou ', # 0xfc
'Yu ', # 0xfd
'Mie ', # 0xfe
'Zhuan ', # 0xff
)
|
Distrotech/intellij-community
|
refs/heads/master
|
python/testData/refactoring/invertBoolean/parameter.before.py
|
83
|
def foo(v<caret>ar=True):
var1 = True
return var
|
tinkerthaler/odoo
|
refs/heads/8.0
|
openerp/tests/common.py
|
60
|
# -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides unittest2 test cases and a few
helpers and classes to write tests.
"""
import errno
import glob
import json
import logging
import os
import select
import subprocess
import threading
import time
import unittest2
import urllib2
import xmlrpclib
from contextlib import contextmanager
from datetime import datetime, timedelta
import werkzeug
import openerp
from openerp import api
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
HOST = '127.0.0.1'
PORT = openerp.tools.config['xmlrpc_port']
DB = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not DB and hasattr(threading.current_thread(), 'dbname'):
DB = threading.current_thread().dbname
# Useless constant, tests are aware of the content of demo data
ADMIN_USER_ID = openerp.SUPERUSER_ID
def at_install(flag):
""" Sets the at-install state of a test, the flag is a boolean specifying
whether the test should (``True``) or should not (``False``) run during
module installation.
By default, tests are run right after installing the module, before
starting the installation of the next module.
"""
def decorator(obj):
obj.at_install = flag
return obj
return decorator
def post_install(flag):
""" Sets the post-install state of a test. The flag is a boolean
specifying whether the test should or should not run after a set of
module installations.
By default, tests are *not* run after installation of all modules in the
current installation set.
"""
def decorator(obj):
obj.post_install = flag
return obj
return decorator
class BaseCase(unittest2.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.registry, self.cr and self.uid to be
initialized by subclasses.
"""
def cursor(self):
return self.registry.cursor()
def ref(self, xid):
""" Returns database ID for the provided :term:`external identifier`,
shortcut for ``get_object_reference``
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: registered id
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
def browse_ref(self, xid):
""" Returns a record object for the provided
:term:`external identifier`
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: :class:`~openerp.models.BaseModel`
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
@contextmanager
def _assertRaises(self, exception):
""" Context manager that clears the environment upon failure. """
with super(BaseCase, self).assertRaises(exception) as cm:
with self.env.clear_upon_failure():
yield cm
def assertRaises(self, exception, func=None, *args, **kwargs):
if func:
with self._assertRaises(exception):
func(*args, **kwargs)
else:
return self._assertRaises(exception)
class TransactionCase(BaseCase):
""" TestCase in which each test method is run in its own transaction,
and with its own cursor. The transaction is rolled back and the cursor
is closed after each test.
"""
def setUp(self):
self.registry = RegistryManager.get(DB)
#: current transaction's cursor
self.cr = self.cursor()
self.uid = openerp.SUPERUSER_ID
#: :class:`~openerp.api.Environment` for the current test case
self.env = api.Environment(self.cr, self.uid, {})
def tearDown(self):
# rollback and close the cursor, and reset the environments
self.env.reset()
self.cr.rollback()
self.cr.close()
class SingleTransactionCase(BaseCase):
""" TestCase in which all test methods are run in the same transaction,
the transaction is started with the first test method and rolled back at
the end of the last.
"""
@classmethod
def setUpClass(cls):
cls.registry = RegistryManager.get(DB)
cls.cr = cls.registry.cursor()
cls.uid = openerp.SUPERUSER_ID
cls.env = api.Environment(cls.cr, cls.uid, {})
@classmethod
def tearDownClass(cls):
# rollback and close the cursor, and reset the environments
cls.env.reset()
cls.cr.rollback()
cls.cr.close()
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HttpCase(TransactionCase):
""" Transactional HTTP TestCase with url_open and phantomjs helpers.
"""
def __init__(self, methodName='runTest'):
super(HttpCase, self).__init__(methodName)
# v8 api with correct xmlrpc exception handling.
self.xmlrpc_url = url_8 = 'http://%s:%d/xmlrpc/2/' % (HOST, PORT)
self.xmlrpc_common = xmlrpclib.ServerProxy(url_8 + 'common')
self.xmlrpc_db = xmlrpclib.ServerProxy(url_8 + 'db')
self.xmlrpc_object = xmlrpclib.ServerProxy(url_8 + 'object')
def setUp(self):
super(HttpCase, self).setUp()
self.registry.enter_test_mode()
# setup a magic session_id that will be rollbacked
self.session = openerp.http.root.session_store.new()
self.session_id = self.session.sid
self.session.db = DB
openerp.http.root.session_store.save(self.session)
# setup an url opener helper
self.opener = urllib2.OpenerDirector()
self.opener.add_handler(urllib2.UnknownHandler())
self.opener.add_handler(urllib2.HTTPHandler())
self.opener.add_handler(urllib2.HTTPSHandler())
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.add_handler(RedirectHandler())
self.opener.addheaders.append(('Cookie', 'session_id=%s' % self.session_id))
def tearDown(self):
self.registry.leave_test_mode()
super(HttpCase, self).tearDown()
def url_open(self, url, data=None, timeout=10):
if url.startswith('/'):
url = "http://localhost:%s%s" % (PORT, url)
return self.opener.open(url, data, timeout)
def authenticate(self, user, password):
if user is not None:
url = '/login?%s' % werkzeug.urls.url_encode({'db': DB,'login': user, 'key': password})
auth = self.url_open(url)
assert auth.getcode() < 400, "Auth failure %d" % auth.getcode()
def phantom_poll(self, phantom, timeout):
""" Phantomjs Test protocol.
Use console.log in phantomjs to output test results:
- for a success: console.log("ok")
- for an error: console.log("error")
Other lines are relayed to the test log.
"""
t0 = datetime.now()
td = timedelta(seconds=timeout)
buf = bytearray()
while True:
# timeout
self.assertLess(datetime.now() - t0, td,
"PhantomJS tests should take less than %s seconds" % timeout)
# read a byte
try:
ready, _, _ = select.select([phantom.stdout], [], [], 0.5)
except select.error, e:
# In Python 2, select.error has no relation to IOError or
# OSError, and no errno/strerror/filename, only a pair of
# unnamed arguments (matching errno and strerror)
err, _ = e.args
if err == errno.EINTR:
continue
raise
if ready:
s = phantom.stdout.read(1)
if not s:
break
buf.append(s)
# process lines
if '\n' in buf:
line, buf = buf.split('\n', 1)
line = str(line)
# relay everything from console.log, even 'ok' or 'error...' lines
_logger.info("phantomjs: %s", line)
if line == "ok":
break
if line.startswith("error"):
line_ = line[6:]
# when error occurs the execution stack may be sent as as JSON
try:
line_ = json.loads(line_)
except ValueError:
pass
self.fail(line_ or "phantomjs test failed")
def phantom_run(self, cmd, timeout):
_logger.info('phantom_run executing %s', ' '.join(cmd))
ls_glob = os.path.expanduser('~/.qws/share/data/Ofi Labs/PhantomJS/http_localhost_%s.*'%PORT)
for i in glob.glob(ls_glob):
_logger.info('phantomjs unlink localstorage %s', i)
os.unlink(i)
try:
phantom = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None)
except OSError:
raise unittest2.SkipTest("PhantomJS not found")
try:
self.phantom_poll(phantom, timeout)
finally:
# kill phantomjs if phantom.exit() wasn't called in the test
if phantom.poll() is None:
phantom.terminate()
phantom.wait()
self._wait_remaining_requests()
_logger.info("phantom_run execution finished")
def _wait_remaining_requests(self):
t0 = int(time.time())
for thread in threading.enumerate():
if thread.name.startswith('openerp.service.http.request.'):
while thread.isAlive():
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
t1 = int(time.time())
if t0 != t1:
_logger.info('remaining requests')
openerp.tools.misc.dumpstacks()
t0 = t1
def phantom_jsfile(self, jsfile, timeout=60, **kw):
options = {
'timeout' : timeout,
'port': PORT,
'db': DB,
'session_id': self.session_id,
}
options.update(kw)
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
# phantom.args[0] == phantomtest path
# phantom.args[1] == options
cmd = [
'phantomjs',
jsfile, phantomtest, json.dumps(options)
]
self.phantom_run(cmd, timeout)
def phantom_js(self, url_path, code, ready="window", login=None, timeout=60, **kw):
""" Test js code running in the browser
- optionnally log as 'login'
- load page given by url_path
- wait for ready object to be available
- eval(code) inside the page
To signal success test do:
console.log('ok')
To signal failure do:
console.log('error')
If neither are done before timeout test fails.
"""
options = {
'port': PORT,
'db': DB,
'url_path': url_path,
'code': code,
'ready': ready,
'timeout' : timeout,
'login' : login,
'session_id': self.session_id,
}
options.update(kw)
options.setdefault('password', options.get('login'))
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
cmd = ['phantomjs', phantomtest, json.dumps(options)]
self.phantom_run(cmd, timeout)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
RAtechntukan/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/mechanize/_util.py
|
123
|
"""Utility functions and date/time routines.
Copyright 2002-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re
import time
import warnings
class ExperimentalWarning(UserWarning):
pass
def experimental(message):
warnings.warn(message, ExperimentalWarning, stacklevel=3)
def hide_experimental_warnings():
warnings.filterwarnings("ignore", category=ExperimentalWarning)
def reset_experimental_warnings():
warnings.filterwarnings("default", category=ExperimentalWarning)
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def hide_deprecations():
warnings.filterwarnings("ignore", category=DeprecationWarning)
def reset_deprecations():
warnings.filterwarnings("default", category=DeprecationWarning)
def write_file(filename, data):
f = open(filename, "wb")
try:
f.write(data)
finally:
f.close()
def get1(sequence):
assert len(sequence) == 1
return sequence[0]
def isstringlike(x):
try: x+""
except: return False
else: return True
## def caller():
## try:
## raise SyntaxError
## except:
## import sys
## return sys.exc_traceback.tb_frame.f_back.f_back.f_code.co_name
from calendar import timegm
# Date/time conversion routines for formats used by the HTTP protocol.
EPOCH = 1970
def my_timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
months_lower = []
for month in months: months_lower.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
days[wday], mday, months[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = months_lower.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = my_timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
wkday_re = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
loose_http_re = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = strict_re.search(text)
if m:
g = m.groups()
mon = months_lower.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return my_timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = wkday_re.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = loose_http_re.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
iso_re = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = iso_re.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
|
xiaoyaozi5566/DiamondCache
|
refs/heads/master
|
tests/configs/tsunami-simple-atomic.py
|
14
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 8
is_top_level = True
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ---------------------
# I/O Cache
# ---------------------
class IOCache(BaseCache):
assoc = 8
block_size = 64
latency = '50ns'
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
addr_ranges = [AddrRange(0, size='8GB')]
forward_snoops = False
is_top_level = True
#cpu
cpu = AtomicSimpleCPU(cpu_id=0)
#the system
system = FSConfig.makeLinuxAlphaSystem('atomic')
system.iocache = IOCache()
system.iocache.cpu_side = system.iobus.master
system.iocache.mem_side = system.membus.slave
system.cpu = cpu
#create the l1/l2 bus
system.toL2Bus = CoherentBus()
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
system.l2c.mem_side = system.membus.slave
#connect up the cpu and l1s
cpu.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# create the interrupt controller
cpu.createInterruptController()
# connect cpu level-1 caches to shared level-2 cache
cpu.connectAllPorts(system.toL2Bus, system.membus)
cpu.clock = '2GHz'
root = Root(full_system=True, system=system)
m5.ticks.setGlobalFrequency('1THz')
|
pozdnyakov/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/telemetry/core/possible_browser.py
|
33
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class PossibleBrowser(object):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, options):
self._browser_type = browser_type
self._options = options
def __repr__(self):
return 'PossibleBrowser(browser_type=%s)' % self.browser_type
@property
def browser_type(self):
return self._browser_type
@property
def options(self):
return self._options
def Create(self):
raise NotImplementedError()
def SupportsOptions(self, options):
"""Tests for extension support."""
raise NotImplementedError()
|
RitwikGupta/pattern
|
refs/heads/master
|
pattern/server/cherrypy/cherrypy/wsgiserver/__init__.py
|
238
|
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import sys
if sys.version_info < (3, 0):
from wsgiserver2 import *
else:
# Le sigh. Boo for backward-incompatible syntax.
exec('from .wsgiserver3 import *')
|
robertmattmueller/sdac-compiler
|
refs/heads/master
|
sympy/plotting/plot_implicit.py
|
12
|
"""Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted used interval
interval arithmetic. It is also possible to specify to use the fall back
algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples:
=========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
|
mickburgs/taxi-sam
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
sharkykh/SickRage
|
refs/heads/develop
|
lib/twilio/rest/resources/trunking/trunks.py
|
24
|
from .. import NextGenInstanceResource, NextGenListResource
class Trunk(NextGenInstanceResource):
"""
A Trunk resource.
See the `TaskRouter API reference
<https://www.twilio.com/docs/sip-trunking/rest/trunks>_`
for more information.
.. attribute:: sid
The unique ID for this Trunk.
"""
def delete(self):
"""
Deletes a Trunk.
"""
return self.parent.delete_instance(self.name)
def update(self, **kwargs):
"""
Updates a Trunk.
"""
return self.parent.update_instance(self.name, **kwargs)
class Trunks(NextGenListResource):
""" A list of Trunk resources """
name = "Trunks"
instance = Trunk
key = "trunks"
def list(self, **kwargs):
"""
Retrieve the list of Trunk resources.
:param Page: The subset of results that needs to be fetched
:param PageSize: The size of the Page that needs to be fetched
"""
return super(Trunks, self).list(**kwargs)
def create(self, **kwargs):
"""
Creates a Trunk.
"""
return self.create_instance(kwargs)
def update(self, sid, body):
"""
Updates a Trunk.
:param sid: A human readable 34 character unique identifier
:param body: Request body
"""
return self.update_instance(sid, body)
def delete(self, sid):
"""
Deletes a Trunk.
:param sid: A human readable 34 character unique identifier
"""
return self.delete_instance(sid)
|
blademainer/intellij-community
|
refs/heads/master
|
python/lib/Lib/distutils/ccompiler.py
|
81
|
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: ccompiler.py 46331 2006-05-26 14:07:23Z bob.ippolito $"
import sys, os, re
from types import *
from copy import copy
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
# __init__ ()
def set_executables (self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if not self.executables.has_key(key):
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
# set_executables ()
def set_executable(self, key, value):
if type(value) is StringType:
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro (self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions (self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (type (defn) is TupleType and
(len (defn) == 1 or
(len (defn) == 2 and
(type (defn[1]) is StringType or defn[1] is None))) and
type (defn[0]) is StringType):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro (self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro (self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs (self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = copy (dirs)
def add_library (self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries (self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = copy (libnames)
def add_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append (dir)
def set_library_dirs (self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = copy (dirs)
def add_runtime_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append (dir)
def set_runtime_library_dirs (self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = copy (dirs)
def add_link_object (self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append (object)
def set_link_objects (self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = copy (objects)
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile.
Merges _fix_compile_args() and _prep_compile().
"""
if outdir is None:
outdir = self.output_dir
elif type(outdir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type(macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if incdirs is None:
incdirs = self.include_dirs
elif type(incdirs) in (ListType, TupleType):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources,
strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
# XXX should redo this code to eliminate skip_source entirely.
# XXX instead create build and issue skip messages inline
if self.force:
skip_source = {} # rebuild everything
for source in sources:
skip_source[source] = 0
elif depends is None:
# If depends is None, figure out which source files we
# have to recompile according to a simplistic check. We
# just compare the source and object file, no deep
# dependency checking involving header files.
skip_source = {} # rebuild everything
for source in sources: # no wait, rebuild nothing
skip_source[source] = 1
n_sources, n_objects = newer_pairwise(sources, objects)
for source in n_sources: # no really, only rebuild what's
skip_source[source] = 0 # out-of-date
else:
# If depends is a list of files, then do a different
# simplistic check. Assume that each object depends on
# its source and all files in the depends list.
skip_source = {}
# L contains all the depends plus a spot at the end for a
# particular source file
L = depends[:] + [None]
for i in range(len(objects)):
source = sources[i]
L[-1] = source
if newer_group(L, objects[i]):
skip_source[source] = 0
else:
skip_source[source] = 1
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
if skip_source[src]:
log.debug("skipping %s (%s up-to-date)", src, obj)
else:
build[obj] = src, ext
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, emxccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args (self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type (macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif type (include_dirs) in (ListType, TupleType):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs
# _fix_compile_args ()
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
if self.force:
skip_source = {} # rebuild everything
for source in sources:
skip_source[source] = 0
elif depends is None:
# If depends is None, figure out which source files we
# have to recompile according to a simplistic check. We
# just compare the source and object file, no deep
# dependency checking involving header files.
skip_source = {} # rebuild everything
for source in sources: # no wait, rebuild nothing
skip_source[source] = 1
n_sources, n_objects = newer_pairwise(sources, objects)
for source in n_sources: # no really, only rebuild what's
skip_source[source] = 0 # out-of-date
else:
# If depends is a list of files, then do a different
# simplistic check. Assume that each object depends on
# its source and all files in the depends list.
skip_source = {}
# L contains all the depends plus a spot at the end for a
# particular source file
L = depends[:] + [None]
for i in range(len(objects)):
source = sources[i]
L[-1] = source
if newer_group(L, objects[i]):
skip_source[source] = 0
else:
skip_source[source] = 1
return objects, skip_source
# _prep_compile ()
def _fix_object_args (self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if type (objects) not in (ListType, TupleType):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif type (libraries) in (ListType, TupleType):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif type (library_dirs) in (ListType, TupleType):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif type (runtime_library_dirs) in (ListType, TupleType):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
# _fix_lib_args ()
def _need_link (self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
# _need_link ()
def detect_language (self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if type(sources) is not ListType:
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# detect_language ()
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib (self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object (self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable (self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname,
includes=None,
include_dirs=None,
libraries=None,
library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % (ext, src_name)
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib"):
raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce (self, msg, level=1):
log.debug(msg)
def debug_print (self, msg):
from distutils.debug import DEBUG
if DEBUG:
print msg
def warn (self, msg):
sys.stderr.write ("warning: %s\n" % msg)
def execute (self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn (self, cmd):
spawn (cmd, dry_run=self.dry_run)
def move_file (self, src, dst):
return move_file (src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
mkpath (name, mode, self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('os2emx', 'emx'),
('java.*', 'jython'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
('mac', 'mwerks'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'mwerks': ('mwerkscompiler', 'MWerksCompiler',
"MetroWerks CodeWarrior"),
'emx': ('emxccompiler', 'EMXCCompiler',
"EMX port of GNU C Compiler for OS/2"),
'jython': ('jythoncompiler', 'JythonCompiler',
"Compiling is not supported on Jython"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass (None, dry_run, force)
def gen_preprocess_options (macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (type (macro) is TupleType and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
# gen_preprocess_options ()
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append (compiler.library_dir_option (dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option (dir)
if type(opt) is ListType:
lib_opts = lib_opts + opt
else:
lib_opts.append (opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split (lib)
if lib_dir:
lib_file = compiler.find_library_file ([lib_dir], lib_name)
if lib_file:
lib_opts.append (lib_file)
else:
compiler.warn ("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append (compiler.library_option (lib))
return lib_opts
# gen_lib_options ()
|
minhphung171093/GreenERP_V7
|
refs/heads/master
|
openerp/addons/account_check_writing/account_voucher.py
|
33
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
from openerp.tools.amount_to_text_en import amount_to_text
from lxml import etree
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def _make_journal_search(self, cr, uid, ttype, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('write_check',False) :
return journal_pool.search(cr, uid, [('allow_check_writing', '=', True)], limit=1)
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
_columns = {
'amount_in_word' : fields.char("Amount in Word" , size=128, readonly=True, states={'draft':[('readonly',False)]}),
'allow_check' : fields.related('journal_id', 'allow_check_writing', type='boolean', string='Allow Check Writing'),
'number': fields.char('Number', size=32),
}
def _amount_to_text(self, cr, uid, amount, currency_id, context=None):
# Currency complete name is not available in res.currency model
# Exceptions done here (EUR, USD, BRL) cover 75% of cases
# For other currencies, display the currency code
currency = self.pool['res.currency'].browse(cr, uid, currency_id, context=context)
if currency.name.upper() == 'EUR':
currency_name = 'Euro'
elif currency.name.upper() == 'USD':
currency_name = 'Dollars'
elif currency.name.upper() == 'BRL':
currency_name = 'reais'
else:
currency_name = currency.name
#TODO : generic amount_to_text is not ready yet, otherwise language (and country) and currency can be passed
#amount_in_word = amount_to_text(amount, context=context)
return amount_to_text(amount, currency=currency_name)
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
""" Inherited - add amount_in_word and allow_check_writting in returned value dictionary """
if not context:
context = {}
default = super(account_voucher, self).onchange_amount(cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=context)
if 'value' in default:
amount = 'amount' in default['value'] and default['value']['amount'] or amount
amount_in_word = self._amount_to_text(cr, uid, amount, currency_id, context=context)
default['value'].update({'amount_in_word':amount_in_word})
if journal_id:
allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).allow_check_writing
default['value'].update({'allow_check':allow_check_writing})
return default
def print_check(self, cr, uid, ids, context=None):
if not ids:
return {}
check_layout_report = {
'top' : 'account.print.check.top',
'middle' : 'account.print.check.middle',
'bottom' : 'account.print.check.bottom',
}
check_layout = self.browse(cr, uid, ids[0], context=context).company_id.check_layout
return {
'type': 'ir.actions.report.xml',
'report_name':check_layout_report[check_layout],
'datas': {
'model':'account.voucher',
'id': ids and ids[0] or False,
'ids': ids and ids or [],
'report_type': 'pdf'
},
'nodestroy': True
}
def create(self, cr, uid, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).write(cr, uid, ids, vals, context=context)
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
"""
Add domain 'allow_check_writting = True' on journal_id field and remove 'widget = selection' on the same
field because the dynamic domain is not allowed on such widget
"""
if not context: context = {}
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='journal_id']")
if context.get('write_check', False) :
for node in nodes:
node.set('domain', "[('type', '=', 'bank'), ('allow_check_writing','=',True)]")
node.set('widget', '')
res['arch'] = etree.tostring(doc)
return res
account_voucher()
|
gcode-mirror/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Options.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,tempfile,optparse,sys,re
from waflib import Logs,Utils,Context
cmds='distclean configure build install clean uninstall check dist distcheck'.split()
options={}
commands=[]
lockfile=os.environ.get('WAFLOCK','.lock-waf_%s_build'%sys.platform)
try:cache_global=os.path.abspath(os.environ['WAFCACHE'])
except KeyError:cache_global=''
platform=Utils.unversioned_sys_platform()
class opt_parser(optparse.OptionParser):
def __init__(self,ctx):
optparse.OptionParser.__init__(self,conflict_handler="resolve",version='waf %s (%s)'%(Context.WAFVERSION,Context.WAFREVISION))
self.formatter.width=Logs.get_term_cols()
p=self.add_option
self.ctx=ctx
jobs=ctx.jobs()
p('-j','--jobs',dest='jobs',default=jobs,type='int',help='amount of parallel jobs (%r)'%jobs)
p('-k','--keep',dest='keep',default=0,action='count',help='keep running happily even if errors are found')
p('-v','--verbose',dest='verbose',default=0,action='count',help='verbosity level -v -vv or -vvv [default: 0]')
p('--nocache',dest='nocache',default=False,action='store_true',help='ignore the WAFCACHE (if set)')
p('--zones',dest='zones',default='',action='store',help='debugging zones (task_gen, deps, tasks, etc)')
gr=optparse.OptionGroup(self,'configure options')
self.add_option_group(gr)
gr.add_option('-o','--out',action='store',default='',help='build dir for the project',dest='out')
gr.add_option('-t','--top',action='store',default='',help='src dir for the project',dest='top')
default_prefix=os.environ.get('PREFIX')
if not default_prefix:
if platform=='win32':
d=tempfile.gettempdir()
default_prefix=d[0].upper()+d[1:]
else:
default_prefix='/usr/local/'
gr.add_option('--prefix',dest='prefix',default=default_prefix,help='installation prefix [default: %r]'%default_prefix)
gr.add_option('--download',dest='download',default=False,action='store_true',help='try to download the tools if missing')
gr=optparse.OptionGroup(self,'build and install options')
self.add_option_group(gr)
gr.add_option('-p','--progress',dest='progress_bar',default=0,action='count',help='-p: progress bar; -pp: ide output')
gr.add_option('--targets',dest='targets',default='',action='store',help='task generators, e.g. "target1,target2"')
gr=optparse.OptionGroup(self,'step options')
self.add_option_group(gr)
gr.add_option('--files',dest='files',default='',action='store',help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"')
default_destdir=os.environ.get('DESTDIR','')
gr=optparse.OptionGroup(self,'install/uninstall options')
self.add_option_group(gr)
gr.add_option('--destdir',help='installation root [default: %r]'%default_destdir,default=default_destdir,dest='destdir')
gr.add_option('-f','--force',dest='force',default=False,action='store_true',help='force file installation')
gr.add_option('--distcheck-args',help='arguments to pass to distcheck',default=None,action='store')
def get_usage(self):
cmds_str={}
for cls in Context.classes:
if not cls.cmd or cls.cmd=='options':
continue
s=cls.__doc__ or''
cmds_str[cls.cmd]=s
if Context.g_module:
for(k,v)in Context.g_module.__dict__.items():
if k in['options','init','shutdown']:
continue
if type(v)is type(Context.create_context):
if v.__doc__ and not k.startswith('_'):
cmds_str[k]=v.__doc__
just=0
for k in cmds_str:
just=max(just,len(k))
lst=[' %s: %s'%(k.ljust(just),v)for(k,v)in cmds_str.items()]
lst.sort()
ret='\n'.join(lst)
return'''waf [commands] [options]
Main commands (example: ./waf build -j4)
%s
'''%ret
class OptionsContext(Context.Context):
cmd='options'
fun='options'
def __init__(self,**kw):
super(OptionsContext,self).__init__(**kw)
self.parser=opt_parser(self)
self.option_groups={}
def jobs(self):
count=int(os.environ.get('JOBS',0))
if count<1:
if'NUMBER_OF_PROCESSORS'in os.environ:
count=int(os.environ.get('NUMBER_OF_PROCESSORS',1))
else:
if hasattr(os,'sysconf_names'):
if'SC_NPROCESSORS_ONLN'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_ONLN'))
elif'SC_NPROCESSORS_CONF'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_CONF'))
if not count and os.name not in('nt','java'):
try:
tmp=self.cmd_and_log(['sysctl','-n','hw.ncpu'],quiet=0)
except Exception:
pass
else:
if re.match('^[0-9]+$',tmp):
count=int(tmp)
if count<1:
count=1
elif count>1024:
count=1024
return count
def add_option(self,*k,**kw):
return self.parser.add_option(*k,**kw)
def add_option_group(self,*k,**kw):
try:
gr=self.option_groups[k[0]]
except KeyError:
gr=self.parser.add_option_group(*k,**kw)
self.option_groups[k[0]]=gr
return gr
def get_option_group(self,opt_str):
try:
return self.option_groups[opt_str]
except KeyError:
for group in self.parser.option_groups:
if group.title==opt_str:
return group
return None
def parse_args(self,_args=None):
global options,commands
(options,leftover_args)=self.parser.parse_args(args=_args)
commands=leftover_args
if options.destdir:
options.destdir=os.path.abspath(os.path.expanduser(options.destdir))
if options.verbose>=1:
self.load('errcheck')
def execute(self):
super(OptionsContext,self).execute()
self.parse_args()
|
xdegenne/cloudify-plugins-common
|
refs/heads/wfworks
|
cloudify/manager.py
|
2
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import urllib2
import utils
from cloudify_rest_client import CloudifyClient
from cloudify.exceptions import HttpException, NonRecoverableError
class NodeInstance(object):
"""
Represents a deployment node instance.
An instance of this class contains runtime information retrieved
from Cloudify's runtime storage as well as the node's state.
"""
def __init__(self,
node_instance_id,
node_id,
runtime_properties=None,
state=None,
version=None,
host_id=None,
relationships=None):
self.id = node_instance_id
self._node_id = node_id
self._runtime_properties = \
DirtyTrackingDict((runtime_properties or {}).copy())
self._state = state
self._version = version
self._host_id = host_id
self._relationships = relationships
def get(self, key):
return self._runtime_properties.get(key)
def put(self, key, value):
self._runtime_properties[key] = value
def delete(self, key):
del(self._runtime_properties[key])
__setitem__ = put
__getitem__ = get
__delitem__ = delete
def __contains__(self, key):
return key in self._runtime_properties
@property
def runtime_properties(self):
"""
The node instance runtime properties.
To update the properties, make changes on the returned dict and call
``update_node_instance`` with the modified instance.
"""
return self._runtime_properties
@property
def version(self):
return self._version
@property
def state(self):
"""
The node instance state.
To update the node instance state, change this property value and
call ``update_node_instance`` with the modified instance.
"""
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def dirty(self):
return self._runtime_properties.dirty
@property
def host_id(self):
return self._host_id
@property
def node_id(self):
return self._node_id
@property
def relationships(self):
return self._relationships
def get_rest_client():
"""
:returns: A REST client configured to connect to the manager in context
:rtype: cloudify_rest_client.CloudifyClient
"""
return CloudifyClient(utils.get_manager_ip(),
utils.get_manager_rest_service_port())
def _save_resource(logger, resource, resource_path, target_path):
if not target_path:
target_path = os.path.join(utils.create_temp_folder(),
os.path.basename(resource_path))
with open(target_path, 'w') as f:
f.write(resource)
logger.info("Downloaded %s to %s" % (resource_path, target_path))
return target_path
def download_resource(resource_path, logger, target_path=None):
"""
Download resource from the manager file server.
:param resource_path: path to resource on the file server
:param logger: logger to use for info output
:param target_path: optional target path for the resource
:returns: path to the downloaded resource
"""
resource = get_resource(resource_path)
return _save_resource(logger, resource, resource_path, target_path)
def download_blueprint_resource(blueprint_id,
resource_path,
logger,
target_path=None):
"""
Download resource from the manager file server with path relative to
the blueprint denoted by ``blueprint_id``.
:param blueprint_id: the blueprint id of the blueprint to download the
resource from
:param resource_path: path to resource relative to blueprint folder
:param logger: logger to use for info output
:param target_path: optional target path for the resource
:returns: path to the downloaded resource
"""
resource = get_blueprint_resource(blueprint_id, resource_path)
return _save_resource(logger, resource, resource_path, target_path)
def get_resource(resource_path, base_url=None):
"""
Get resource from the manager file server.
:param resource_path: path to resource on the file server
:returns: resource content
"""
if base_url is None:
base_url = utils.get_manager_file_server_url()
try:
url = '{0}/{1}'.format(base_url, resource_path)
response = urllib2.urlopen(url)
return response.read()
except urllib2.HTTPError as e:
raise HttpException(e.url, e.code, e.msg)
def get_blueprint_resource(blueprint_id, resource_path):
"""
Get resource from the manager file server with patch relative to
the blueprint denoted by ``blueprint_id``.
:param blueprint_id: the blueprint id of the blueprint to download
the resource from
:param resource_path: path to resource relative to blueprint folder
:returns: resource content
"""
base_url = "{0}/{1}".format(utils
.get_manager_file_server_blueprints_root_url(),
blueprint_id)
return get_resource(resource_path, base_url=base_url)
def get_node_instance(node_instance_id):
"""
Read node instance data from the storage.
:param node_instance_id: the node instance id
:rtype: NodeInstance
"""
client = get_rest_client()
instance = client.node_instances.get(node_instance_id)
return NodeInstance(node_instance_id,
instance.node_id,
runtime_properties=instance.runtime_properties,
state=instance.state,
version=instance.version,
host_id=instance.host_id,
relationships=instance.relationships)
def update_node_instance(node_instance):
"""
Update node instance data changes in the storage.
:param node_instance: the node instance with the updated data
"""
client = get_rest_client()
client.node_instances.update(
node_instance.id,
state=node_instance.state,
runtime_properties=node_instance.runtime_properties,
version=node_instance.version)
def get_node_instance_ip(node_instance_id):
"""
Get the IP address of the host the node instance denoted by
``node_instance_id`` is contained in.
"""
client = get_rest_client()
instance = client.node_instances.get(node_instance_id)
if instance.host_id is None:
raise NonRecoverableError('node instance: {0} is missing host_id'
'property'.format(instance.id))
if node_instance_id != instance.host_id:
instance = client.node_instances.get(instance.host_id)
if instance.runtime_properties.get('ip'):
return instance.runtime_properties['ip']
node = client.nodes.get(instance.deployment_id, instance.node_id)
if node.properties.get('ip'):
return node.properties['ip']
raise NonRecoverableError('could not find ip for node instance: {0} with '
'host id: {1}'.format(node_instance_id,
instance.id))
# TODO: some nasty code duplication between these two methods
def update_execution_status(execution_id, status, error=None):
"""
Update the execution status of the execution denoted by ``execution_id``.
:returns: The updated status
"""
client = get_rest_client()
return client.executions.update(execution_id, status, error)
def get_bootstrap_context():
"""Read the manager bootstrap context."""
client = get_rest_client()
context = client.manager.get_context()['context']
return context.get('cloudify', {})
def get_provider_context():
"""Read the manager provider context."""
client = get_rest_client()
context = client.manager.get_context()
return context['context']
class DirtyTrackingDict(dict):
def __init__(self, *args, **kwargs):
super(DirtyTrackingDict, self).__init__(*args, **kwargs)
self.modifiable = True
self.dirty = False
def __setitem__(self, key, value):
super(DirtyTrackingDict, self).__setitem__(key, value)
self._set_changed()
def __delitem__(self, key):
super(DirtyTrackingDict, self).__delitem__(key)
self._set_changed()
def update(self, E=None, **F):
super(DirtyTrackingDict, self).update(E, **F)
self._set_changed()
def clear(self):
super(DirtyTrackingDict, self).clear()
self._set_changed()
def pop(self, k, d=None):
super(DirtyTrackingDict, self).pop(k, d)
self._set_changed()
def popitem(self):
super(DirtyTrackingDict, self).popitem()
self._set_changed()
def _set_changed(self):
# python 2.6 doesn't have modifiable during copy.deepcopy
if hasattr(self, 'modifiable') and not self.modifiable:
raise NonRecoverableError('Cannot modify runtime properties of'
' relationship node instances')
self.dirty = True
|
TheKK/Shedskin
|
refs/heads/master
|
examples/com/github/tarsa/tarsalzp/prelude/Long.py
|
6
|
#
# Copyright (c) 2012, Piotr Tarsa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'Piotr Tarsa'
class Long(object):
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def shl8(self):
self.a = ((self.a & 0x00ff) << 8) + ((self.b & 0xff00) >> 8)
self.b = ((self.b & 0x00ff) << 8) + ((self.c & 0xff00) >> 8)
self.c = ((self.c & 0x00ff) << 8) + ((self.d & 0xff00) >> 8)
self.d = (self.d & 0x00ff) << 8
def shr8(self):
self.d = ((self.d & 0xff00) >> 8) + ((self.c & 0x00ff) << 8)
self.c = ((self.c & 0xff00) >> 8) + ((self.b & 0x00ff) << 8)
self.b = ((self.b & 0xff00) >> 8) + ((self.a & 0x00ff) << 8)
self.a = (self.a & 0xff00) >> 8
|
jeremyh/agdc
|
refs/heads/master
|
src/abstract_ingester/tile_record.py
|
4
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
TileRecord: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
import logging
import os
from ingest_db_wrapper import IngestDBWrapper, TC_PENDING
from agdc.cube_util import get_file_size_mb
import re
import psycopg2
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class TileRecord(object):
# pylint: disable=too-many-instance-attributes
"""TileRecord database interface class."""
TILE_METADATA_FIELDS = ['tile_id',
'x_index',
'y_index',
'tile_type_id',
'dataset_id',
'tile_pathname',
'tile_class_id',
'tile_size',
'ctime'
]
def __init__(self, collection, dataset_record, tile_contents):
self.collection = collection
self.datacube = collection.datacube
self.dataset_record = dataset_record
self.tile_contents = tile_contents
self.tile_footprint = tile_contents.tile_footprint
self.tile_type_id = tile_contents.tile_type_id
#Set tile_class_id to pending.
self.tile_class_id = TC_PENDING
#Set tile_id, determined below from database query
self.tile_id = None
self.db = IngestDBWrapper(self.datacube.db_connection)
# Fill a dictionary with data for the tile
tile_dict = {}
self.tile_dict = tile_dict
tile_dict['x_index'] = self.tile_footprint[0]
tile_dict['y_index'] = self.tile_footprint[1]
tile_dict['tile_type_id'] = self.tile_type_id
tile_dict['dataset_id'] = self.dataset_record.dataset_id
# Store final destination in the 'tile_pathname' field
tile_dict['tile_pathname'] = self.tile_contents.tile_output_path
tile_dict['tile_class_id'] = 1
# The physical file is currently in the temporary location
tile_dict['tile_size'] = \
get_file_size_mb(self.tile_contents
.temp_tile_output_path)
self.update_tile_footprint()
# Make the tile record entry on the database:
self.tile_id = self.db.get_tile_id(tile_dict)
if self.tile_id is None:
self.tile_id = self.db.insert_tile_record(tile_dict)
else:
# If there was any existing tile corresponding to tile_dict then
# it should already have been removed.
raise AssertionError("Attempt to recreate an existing tile.")
tile_dict['tile_id'] = self.tile_id
def update_tile_footprint(self):
"""Update the tile footprint entry in the database"""
if not self.db.tile_footprint_exists(self.tile_dict):
# We may need to create a new footprint record.
footprint_dict = {'x_index': self.tile_footprint[0],
'y_index': self.tile_footprint[1],
'tile_type_id': self.tile_type_id,
'x_min': self.tile_contents.tile_extents[0],
'y_min': self.tile_contents.tile_extents[1],
'x_max': self.tile_contents.tile_extents[2],
'y_max': self.tile_contents.tile_extents[3],
'bbox': 'Populate this within sql query?'}
# Create an independent database connection for this transaction.
my_db = IngestDBWrapper(self.datacube.create_connection())
try:
with self.collection.transaction(my_db):
if not my_db.tile_footprint_exists(self.tile_dict):
my_db.insert_tile_footprint(footprint_dict)
except psycopg2.IntegrityError:
# If we get an IntegrityError we assume the tile_footprint
# is already in the database, and we do not need to add it.
pass
finally:
my_db.close()
|
fvcproductions/dotfiles
|
refs/heads/master
|
bin/sketch/Plugins/WakaTime.sketchplugin/Contents/Resources/wakatime/packages/pygments/styles/rrt.py
|
31
|
# -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
|
neumerance/cloudloon2
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/django/contrib/localflavor/in_/in_states.py
|
197
|
"""
A mapping of state misspellings/abbreviations to normalized abbreviations, and
an alphabetical list of states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
STATE_CHOICES = (
('KA', 'Karnataka'),
('AP', 'Andhra Pradesh'),
('KL', 'Kerala'),
('TN', 'Tamil Nadu'),
('MH', 'Maharashtra'),
('UP', 'Uttar Pradesh'),
('GA', 'Goa'),
('GJ', 'Gujarat'),
('RJ', 'Rajasthan'),
('HP', 'Himachal Pradesh'),
('JK', 'Jammu and Kashmir'),
('AR', 'Arunachal Pradesh'),
('AS', 'Assam'),
('BR', 'Bihar'),
('CG', 'Chattisgarh'),
('HR', 'Haryana'),
('JH', 'Jharkhand'),
('MP', 'Madhya Pradesh'),
('MN', 'Manipur'),
('ML', 'Meghalaya'),
('MZ', 'Mizoram'),
('NL', 'Nagaland'),
('OR', 'Orissa'),
('PB', 'Punjab'),
('SK', 'Sikkim'),
('TR', 'Tripura'),
('UA', 'Uttarakhand'),
('WB', 'West Bengal'),
# Union Territories
('AN', 'Andaman and Nicobar'),
('CH', 'Chandigarh'),
('DN', 'Dadra and Nagar Haveli'),
('DD', 'Daman and Diu'),
('DL', 'Delhi'),
('LD', 'Lakshadweep'),
('PY', 'Pondicherry'),
)
STATES_NORMALIZED = {
'an': 'AN',
'andaman and nicobar': 'AN',
'andra pradesh': 'AP',
'andrapradesh': 'AP',
'andhrapradesh': 'AP',
'ap': 'AP',
'andhra pradesh': 'AP',
'ar': 'AR',
'arunachal pradesh': 'AR',
'assam': 'AS',
'as': 'AS',
'bihar': 'BR',
'br': 'BR',
'cg': 'CG',
'chattisgarh': 'CG',
'ch': 'CH',
'chandigarh': 'CH',
'daman and diu': 'DD',
'dd': 'DD',
'dl': 'DL',
'delhi': 'DL',
'dn': 'DN',
'dadra and nagar haveli': 'DN',
'ga': 'GA',
'goa': 'GA',
'gj': 'GJ',
'gujarat': 'GJ',
'himachal pradesh': 'HP',
'hp': 'HP',
'hr': 'HR',
'haryana': 'HR',
'jharkhand': 'JH',
'jh': 'JH',
'jammu and kashmir': 'JK',
'jk': 'JK',
'karnataka': 'KA',
'karnatka': 'KA',
'ka': 'KA',
'kerala': 'KL',
'kl': 'KL',
'ld': 'LD',
'lakshadweep': 'LD',
'maharastra': 'MH',
'mh': 'MH',
'maharashtra': 'MH',
'meghalaya': 'ML',
'ml': 'ML',
'mn': 'MN',
'manipur': 'MN',
'madhya pradesh': 'MP',
'mp': 'MP',
'mizoram': 'MZ',
'mizo': 'MZ',
'mz': 'MZ',
'nl': 'NL',
'nagaland': 'NL',
'orissa': 'OR',
'odisa': 'OR',
'orisa': 'OR',
'or': 'OR',
'pb': 'PB',
'punjab': 'PB',
'py': 'PY',
'pondicherry': 'PY',
'rajasthan': 'RJ',
'rajastan': 'RJ',
'rj': 'RJ',
'sikkim': 'SK',
'sk': 'SK',
'tamil nadu': 'TN',
'tn': 'TN',
'tamilnadu': 'TN',
'tamilnad': 'TN',
'tr': 'TR',
'tripura': 'TR',
'ua': 'UA',
'uttarakhand': 'UA',
'up': 'UP',
'uttar pradesh': 'UP',
'westbengal': 'WB',
'bengal': 'WB',
'wb': 'WB',
'west bengal': 'WB'
}
|
utopiaprince/micropython
|
refs/heads/master
|
tests/float/string_format.py
|
21
|
# Change the following to True to get a much more comprehensive set of tests
# to run, albeit, which take considerably longer.
full_tests = False
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("{:10.4}", 123.456)
test("{:10.4e}", 123.456)
test("{:10.4e}", -123.456)
test("{:10.4f}", 123.456)
test("{:10.4f}", -123.456)
test("{:10.4g}", 123.456)
test("{:10.4g}", -123.456)
test("{:10.4n}", 123.456)
test("{:e}", 100)
test("{:f}", 200)
test("{:g}", 300)
test("{:10.4E}", 123.456)
test("{:10.4E}", -123.456)
test("{:10.4F}", 123.456)
test("{:10.4F}", -123.456)
test("{:10.4G}", 123.456)
test("{:10.4G}", -123.456)
test("{:06e}", float("inf"))
test("{:06e}", float("-inf"))
test("{:06e}", float("nan"))
# The following fails right now
#test("{:10.1}", 0.0)
def test_fmt(conv, fill, alignment, sign, prefix, width, precision, type, arg):
fmt = '{'
if conv:
fmt += '!'
fmt += conv
fmt += ':'
if alignment:
fmt += fill
fmt += alignment
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
if fill == '0' and alignment == '=':
fmt = '{:'
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
eg_nums = (0.0, -0.0, 0.1, 1.234, 12.3459, 1.23456789, 123456789.0, -0.0,
-0.1, -1.234, -12.3459, 1e4, 1e-4, 1e5, 1e-5, 1e6, 1e-6, 1e10,
1e37, -1e37, 1e-37, -1e-37,
1.23456e8, 1.23456e7, 1.23456e6, 1.23456e5, 1.23456e4, 1.23456e3, 1.23456e2, 1.23456e1, 1.23456e0,
1.23456e-1, 1.23456e-2, 1.23456e-3, 1.23456e-4, 1.23456e-5, 1.23456e-6, 1.23456e-7, 1.23456e-8,
-1.23456e8, -1.23456e7, -1.23456e6, -1.23456e5, -1.23456e4, -1.23456e3, -1.23456e2, -1.23456e1, -1.23456e0,
-1.23456e-1, -1.23456e-2, -1.23456e-3, -1.23456e-4, -1.23456e-5, -1.23456e-6, -1.23456e-7, -1.23456e-8)
if full_tests:
for type in ('e', 'E', 'g', 'G', 'n'):
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', '@', '0', ' '):
for sign in ('', '+', '-', ' '):
for prec in ('', '1', '3', '6'):
for num in eg_nums:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
# Note: We use 1.23459 rather than 1.2345 because '{:3f}'.format(1.2345)
# rounds differently than print("%.3f", 1.2345);
f_nums = (0.0, -0.0, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0,
0.0012, 0.0123, 0.1234, 1.23459, 12.3456,
-0.0001, -0.001, -0.01, -0.1, -1.0, -10.0,
-0.0012, -0.0123, -0.1234, -1.23459, -12.3456)
if full_tests:
for type in ('f', 'F'):
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
# An empty precision defaults to 6, but when uPy is
# configured to use a float, we can only use a
# precision of 6 with numbers less than 10 and still
# get results that compare to CPython (which uses
# long doubles).
for prec in ('1', '2', '3'):
for num in f_nums:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
for num in int_nums2:
test_fmt('', fill, alignment, sign, '', width, '', type, num)
pct_nums1 = (0.1, 0.58, 0.99, -0.1, -0.58, -0.99)
pct_nums2 = (True, False, 1, 0, -1)
if full_tests:
type = '%'
for width in ('', '4', '6', '8', '10'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
# An empty precision defaults to 6, but when uPy is
# configured to use a float, we can only use a
# precision of 6 with numbers less than 10 and still
# get results that compare to CPython (which uses
# long doubles).
for prec in ('1', '2', '3'):
for num in pct_nums1:
test_fmt('', fill, alignment, sign, '', width, prec, type, num)
for num in pct_nums2:
test_fmt('', fill, alignment, sign, '', width, '', type, num)
else:
for num in pct_nums1:
test_fmt('', '', '', '', '', '', '1', '%', num)
# We don't currently test a type of '' with floats (see the detailed comment
# in objstr.c)
# tests for errors in format string
try:
'{:10.1b}'.format(0.0)
except ValueError:
print('ValueError')
|
owlzhou/ttornado
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/html5lib/trie/__init__.py
|
1735
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
|
suxinde2009/thefuck
|
refs/heads/master
|
tests/rules/test_django_south_merge.py
|
20
|
import pytest
from thefuck.rules.django_south_merge import match, get_new_command
from tests.utils import Command
@pytest.fixture
def stderr():
return '''Running migrations for app:
! Migration app:0003_auto... should not have been applied before app:0002_auto__add_field_query_due_date_ but was.
Traceback (most recent call last):
File "/home/nvbn/work/.../bin/python", line 42, in <module>
exec(compile(__file__f.read(), __file__, "exec"))
File "/home/nvbn/work/.../app/manage.py", line 34, in <module>
execute_from_command_line(sys.argv)
File "/home/nvbn/work/.../lib/django/core/management/__init__.py", line 443, in execute_from_command_line
utility.execute()
File "/home/nvbn/work/.../lib/django/core/management/__init__.py", line 382, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/nvbn/work/.../lib/django/core/management/base.py", line 196, in run_from_argv
self.execute(*args, **options.__dict__)
File "/home/nvbn/work/.../lib/django/core/management/base.py", line 232, in execute
output = self.handle(*args, **options)
File "/home/nvbn/work/.../app/lib/south/management/commands/migrate.py", line 108, in handle
ignore_ghosts = ignore_ghosts,
File "/home/nvbn/work/.../app/lib/south/migration/__init__.py", line 207, in migrate_app
raise exceptions.InconsistentMigrationHistory(problems)
south.exceptions.InconsistentMigrationHistory: Inconsistent migration history
The following options are available:
--merge: will just attempt the migration ignoring any potential dependency conflicts.
'''
def test_match(stderr):
assert match(Command('./manage.py migrate', stderr=stderr), None)
assert match(Command('python manage.py migrate', stderr=stderr), None)
assert not match(Command('./manage.py migrate'), None)
assert not match(Command('app migrate', stderr=stderr), None)
assert not match(Command('./manage.py test', stderr=stderr), None)
def test_get_new_command():
assert get_new_command(Command('./manage.py migrate auth'), None) \
== './manage.py migrate auth --merge'
|
kelvin13/shifty-octocat
|
refs/heads/master
|
pygments/lexers/_cocoa_builtins.py
|
25
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._cocoa_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file defines a set of types used across Cocoa frameworks from Apple.
There is a list of @interfaces, @protocols and some other (structs, unions)
File may be also used as standalone generator for aboves.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
COCOA_INTERFACES = set(['UITableViewCell', 'HKCorrelationQuery', 'NSURLSessionDataTask', 'PHFetchOptions', 'NSLinguisticTagger', 'NSStream', 'AVAudioUnitDelay', 'GCMotion', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'AVAudioEnvironmentDistanceAttenuationParameters', 'HKStatisticsCollection', 'SCNPlane', 'CBPeer', 'JSContext', 'SCNTransaction', 'SCNTorus', 'AVAudioUnitEffect', 'UICollectionReusableView', 'MTLSamplerDescriptor', 'AVAssetReaderSampleReferenceOutput', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'SKRange', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UIVisualEffectView', 'CIWarpKernel', 'PKObject', 'MKRoute', 'MPVolumeView', 'UIPrintInfo', 'SCNText', 'ADClient', 'PKPayment', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'WKScriptMessage', 'AVMIDIPlayer', 'PHCollectionListChangeRequest', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'SKPaymentTransaction', 'NEOnDemandRuleConnect', 'NSShadow', 'SCNView', 'NSURLSessionConfiguration', 'MTLVertexAttributeDescriptor', 'CBCharacteristic', 'HKQuantityType', 'CKLocationSortDescriptor', 'NEVPNIKEv2SecurityAssociationParameters', 'CMStepCounter', 'NSNetService', 'AVAssetWriterInputMetadataAdaptor', 'UICollectionView', 'UIViewPrintFormatter', 'SCNLevelOfDetail', 'CAShapeLayer', 'MCPeerID', 'MPRatingCommand', 'WKNavigation', 'NSDictionary', 'NSFileVersion', 'CMGyroData', 'AVAudioUnitDistortion', 'CKFetchRecordsOperation', 'SKPhysicsJointSpring', 'SCNHitTestResult', 'AVAudioTime', 'CIFilter', 'UIView', 'SCNConstraint', 'CAPropertyAnimation', 'MKMapItem', 'MPRemoteCommandCenter', 'PKPaymentSummaryItem', 'UICollectionViewFlowLayoutInvalidationContext', 'UIInputViewController', 'PKPass', 'SCNPhysicsBehavior', 'MTLRenderPassColorAttachmentDescriptor', 'MKPolygonRenderer', 'CKNotification', 'JSValue', 'PHCollectionList', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'MPFeedbackCommand', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'NSLengthFormatter', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'SCNPyramid', 'PHAssetCollection', 'MKMapView', 'HMHomeManager', 'CATransition', 'MTLCompileOptions', 'UIVibrancyEffect', 'CLCircularRegion', 'MKTileOverlay', 'SCNShape', 'ACAccountCredential', 'SKPhysicsJointLimit', 'MKMapSnapshotter', 'AVMediaSelectionGroup', 'NSIndexSet', 'CBPeripheralManager', 'CKRecordZone', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'AVAudioOutputNode', 'MTLVertexAttributeDescriptorArray', 'MKETAResponse', 'SKTransition', 'SSReadingList', 'HKSourceQuery', 'UITableViewRowAction', 'UITableView', 'SCNParticlePropertyController', 'AVCaptureStillImageOutput', 'GCController', 'AVAudioPlayerNode', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NEOnDemandRuleEvaluateConnection', 'SKEffectNode', 'HKQuantity', 'GCControllerElement', 'AVPlayerItemAccessLogEvent', 'SCNBox', 'NSExtensionContext', 'MKOverlayRenderer', 'SCNPhysicsVehicle', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'CKQuery', 'AVAudioMixerNode', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'NSBatchUpdateRequest', 'PHContentEditingOutput', 'PHObjectChangeDetails', 'HKWorkoutType', 'MPMoviePlayerController', 'AVAudioFormat', 'HMTrigger', 'MTLRenderPassDepthAttachmentDescriptor', 'SCNRenderer', 'GKScore', 'UISplitViewController', 'HKSource', 'NSURLConnection', 'ABUnknownPersonViewController', 'SCNTechnique', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'AVAudioPCMBuffer', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'SKProductsRequest', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SCNFloor', 'NSSortDescriptor', 'CKFetchNotificationChangesOperation', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVAudioUnitGenerator', 'WKBackForwardList', 'SKMutableTexture', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'MPRatingCommandEvent', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'MPRemoteCommand', 'AVCaptureSession', 'UIStepper', 'UIRefreshControl', 'NEEvaluateConnectionRule', 'CKModifyRecordsOperation', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'PKShippingMethod', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UILexiconEntry', 'CMMotionActivity', 'SKAction', 'SKShader', 'AVPlayerItemOutput', 'MTLRenderPassAttachmentDescriptor', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'CAInterAppAudioTransportView', 'SKMutablePayment', 'UIImage', 'PHCachingImageManager', 'SCNTransformConstraint', 'HKCorrelationType', 'UIColor', 'SCNGeometrySource', 'AVCaptureAutoExposureBracketedStillImageSettings', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'UICollectionViewLayoutAttributes', 'AVAssetReaderOutputMetadataAdaptor', 'NSEntityMigrationPolicy', 'HMUser', 'NSLocale', 'NSURLSession', 'SCNCamera', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'HMServiceGroup', 'UIPasteboard', 'PHContentEditingInput', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSEnergyFormatter', 'SLRequest', 'HMCharacteristic', 'AVPlayerLayer', 'MTLRenderPassDescriptor', 'SKPayment', 'NSPointerArray', 'AVAudioMix', 'SCNLight', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'HKCategorySample', 'AVAudioEnvironmentReverbParameters', 'SCNMorpher', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'HMActionSet', 'CTSubscriber', 'PHAssetChangeRequest', 'NSPersistentStoreRequest', 'UITabBarController', 'HKQuantitySample', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UIPresentationController', 'UINavigationItem', 'PHFetchResultChangeDetails', 'PHImageManager', 'AVCaptureManualExposureBracketedStillImageSettings', 'UIStoryboardPopoverSegue', 'SCNLookAtConstraint', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'NEOnDemandRuleDisconnect', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSHTTPCookie', 'NSURLResponse', 'SKPaymentQueue', 'NSAssertionHandler', 'MKReverseGeocoder', 'GCControllerAxisInput', 'NSArray', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVMutableVideoCompositionInstruction', 'AVAssetReaderOutput', 'EAGLContext', 'WKFrameInfo', 'CMPedometer', 'MyClass', 'CKModifyBadgeOperation', 'AVCaptureAudioFileOutput', 'SKEmitterNode', 'NSMachPort', 'AVVideoCompositionCoreAnimationTool', 'PHCollection', 'SCNPhysicsWorld', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'CLFloor', 'AVAsynchronousVideoCompositionRequest', 'SCNGeometry', 'SCNIKConstraint', 'CIKernel', 'CAGradientLayer', 'HKCharacteristicType', 'NSFormatter', 'SCNAction', 'CATransaction', 'CBUUID', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'AVAudioUnit', 'MCBrowserViewController', 'UIFontDescriptor', 'NSRelationshipDescription', 'HKSample', 'WKWebView', 'NSMutableAttributedString', 'NSPersistentStoreAsynchronousResult', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'HKCorrelation', 'CATextLayer', 'NSNotificationQueue', 'UINib', 'GLKTextureLoader', 'HKObjectType', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'AVPlayerViewController', 'CAScrollLayer', 'GKSavedGame', 'NSTextCheckingResult', 'PHObjectPlaceholder', 'SKConstraint', 'EKEventEditViewController', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'SCNNode', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'NEOnDemandRule', 'MTLRenderPassStencilAttachmentDescriptor', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'NEOnDemandRuleIgnore', 'SKRegion', 'AVAssetResourceLoader', 'EAWiFiUnconfiguredAccessoryBrowser', 'NSUserActivity', 'CTCall', 'UIPrinterPickerController', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'CKRecordZoneID', 'HKAnchoredObjectQuery', 'CKFetchRecordZonesOperation', 'UIStoryboardSegue', 'ACAccountType', 'GKSession', 'SKVideoNode', 'PHChange', 'SKReceiptRefreshRequest', 'GCExtendedGamepadSnapshot', 'MPSeekCommandEvent', 'GCExtendedGamepad', 'CAValueFunction', 'SCNCylinder', 'NSNotification', 'NSBatchUpdateResult', 'PKPushCredentials', 'SCNPhysicsSliderJoint', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'UILexicon', 'MTLArrayType', 'AVAudioUnitReverb', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIPrinter', 'NSOrderedSet', 'CBAttribute', 'PKPushPayload', 'NSIncrementalStoreNode', 'EKEventStore', 'MPRemoteCommandEvent', 'UISlider', 'UIBlurEffect', 'CKAsset', 'AVCaptureInput', 'AVAudioEngine', 'MTLVertexDescriptor', 'SKPhysicsBody', 'NSOperation', 'PKPaymentPass', 'UIImageAsset', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CABTMIDILocalPeripheralViewController', 'NEVPNManager', 'HKQuery', 'SCNPhysicsContact', 'CBMutableService', 'AVSampleBufferDisplayLayer', 'SCNSceneSource', 'SKLightNode', 'CKDiscoveredUserInfo', 'NSMutableArray', 'MTLDepthStencilDescriptor', 'MTLArgument', 'NSMassFormatter', 'CIRectangleFeature', 'PKPushRegistry', 'NEVPNConnection', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'HKWorkout', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'MPChangePlaybackRateCommandEvent', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'AVAudioUnitSampler', 'NSItemProvider', 'SKUniform', 'MPMediaPickerController', 'CKOperation', 'MTLRenderPipelineDescriptor', 'EAWiFiUnconfiguredAccessory', 'NSFileCoordinator', 'SKRequest', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'SCNCone', 'MPMediaItem', 'SCNMaterial', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NEVPNProtocol', 'NSCompoundPredicate', 'HKHealthStore', 'MKMultiPoint', 'HKSampleType', 'UIPrintFormatter', 'AVAudioUnitEQFilterParameters', 'SKView', 'NSConstantString', 'UIPopoverController', 'CKDatabase', 'AVMetadataFaceObject', 'UIAccelerometer', 'EKEventViewController', 'CMAltitudeData', 'MTLStencilDescriptor', 'UISwipeGestureRecognizer', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'NSAsynchronousFetchRequest', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'CKNotificationID', 'AVAudioSession', 'HKUnit', 'NSNull', 'NSPersistentStoreResult', 'MKCircleView', 'AVAudioChannelLayout', 'NEVPNProtocolIKEv2', 'WKProcessPool', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'CIQRCodeFeature', 'AVMutableVideoComposition', 'PHFetchResult', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'PKPaymentRequest', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'AVAudioUnitTimePitch', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'UIDocumentPickerViewController', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'AVAudioFile', 'UIActivity', 'NSPipe', 'MKShape', 'NSMergeConflict', 'CIImage', 'HKObject', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'CKMarkNotificationsReadOperation', 'CKSubscription', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'HMAccessoryBrowser', 'GKTurnBasedEventHandler', 'UIWebView', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'WKNavigationResponse', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'HKStatistics', 'UILocalNotification', 'HKBiologicalSexObject', 'AVURLAsset', 'CBPeripheral', 'NSDateComponentsFormatter', 'SKSpriteNode', 'UIAccessibilityElement', 'AVAssetWriterInputGroup', 'HMZone', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'PHPhotoLibrary', 'CBService', 'UIDocumentMenuViewController', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'ASIdentifierManager', 'AVAssetResourceLoadingRequest', 'SLComposeServiceViewController', 'UIPinchGestureRecognizer', 'PHObject', 'NSExtensionItem', 'HKSampleQuery', 'MTLRenderPipelineColorAttachmentDescriptorArray', 'MKRouteStep', 'SCNCapsule', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'UITraitCollection', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'MTLRenderPipelineReflection', 'CLHeading', 'CLVisit', 'MKDirectionsResponse', 'HMAccessory', 'MTLStructType', 'UITextView', 'CMMagnetometerData', 'UICollisionBehavior', 'UIProgressView', 'CKServerChangeToken', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'NSPropertyMapping', 'ALAssetsFilter', 'SK3DNode', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'SCNSphere', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSFileProviderExtension', 'NSDecimalNumberHandler', 'NSAtomicStoreCacheNode', 'NSAtomicStore', 'EKAlarm', 'CKNotificationInfo', 'AVAudioUnitEQ', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'MTLVertexAttribute', 'NSExpressionDescription', 'HKStatisticsCollectionQuery', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'UIAlertAction', 'NSPropertyListSerialization', 'GKPeerPickerController', 'UIUserNotificationSettings', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'MTLRenderPassColorAttachmentDescriptorArray', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'UIViewController', 'PKPaymentToken', 'MFMessageComposeViewController', 'AVAudioInputNode', 'NSDataDetector', 'CABTMIDICentralViewController', 'AVAudioUnitMIDIInstrument', 'AVCaptureVideoPreviewLayer', 'AVAssetWriterInputPassDescription', 'MPChangePlaybackRateCommand', 'NSURLComponents', 'CAMetalLayer', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'CKDiscoverUserInfosOperation', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'SCNGeometryElement', 'NCWidgetController', 'CAEmitterLayer', 'MKUserLocation', 'UIImagePickerController', 'CIFeature', 'AVCaptureDeviceInput', 'ALAsset', 'NSURLSessionDownloadTask', 'SCNPhysicsHingeJoint', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'SCNMaterialProperty', 'UIFont', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'WKWebViewConfiguration', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'AVComposition', 'CKDiscoverAllContactsOperation', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'UIMutableUserNotificationCategory', 'NSLock', 'NEVPNProtocolIPSec', 'ADBannerView', 'UIDocumentPickerExtensionViewController', 'UIActivityIndicatorView', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIAccessibilityCustomAction', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'HKBloodTypeObject', 'MTLVertexBufferLayoutDescriptorArray', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'PKPaymentAuthorizationViewController', 'UIScrollView', 'WKNavigationAction', 'AVPlayerItemMetadataOutput', 'EKRecurrenceDayOfWeek', 'NSNumberFormatter', 'MTLComputePipelineReflection', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'SCNSkinner', 'AVCaptureMetadataOutput', 'SCNAnimationEvent', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'WKBackForwardListItem', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKCalendar', 'HKWorkoutEvent', 'NSMutableURLRequest', 'UIVideoEditorController', 'HMTimerTrigger', 'AVAudioUnitVarispeed', 'UIDynamicAnimator', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'UIPushBehavior', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'WKUserScript', 'EKEvent', 'NSDateFormatter', 'NSAsynchronousFetchResult', 'AVAssetWriterInputPixelBufferAdaptor', 'UIVisualEffect', 'UICollectionViewCell', 'UITextField', 'CLPlacemark', 'MPPlayableContentManager', 'AVCaptureOutput', 'HMCharacteristicWriteAction', 'CKModifySubscriptionsOperation', 'NSPropertyDescription', 'GCGamepad', 'UIMarkupTextPrintFormatter', 'SCNTube', 'NSPersistentStoreCoordinator', 'AVAudioEnvironmentNode', 'GKMatchmaker', 'CIContext', 'NSThread', 'SLComposeSheetConfigurationItem', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'SCNPhysicsBallSocketJoint', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'AVAudioIONode', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CKRecordZoneNotification', 'SCNScene', 'MPMovieAccessLogEvent', 'CKFetchSubscriptionsOperation', 'CAEmitterCell', 'AVAudioUnitTimeEffect', 'HMCharacteristicMetadata', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'UIUserNotificationCategory', 'SCNPhysicsVehicleWheel', 'HKCategoryType', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'MTLRenderPipelineColorAttachmentDescriptor', 'SCNPhysicsShape', 'UISearchController', 'SCNPhysicsBody', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'SKNode', 'UIMutableUserNotificationAction', 'SCNProgram', 'AVSpeechSynthesisVoice', 'CMAltimeter', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SCNPhysicsField', 'CKReference', 'LAContext', 'CKRecordID', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'AVAudioBuffer', 'CIColorKernel', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'CKQueryNotification', 'PHAdjustmentData', 'EASession', 'AVAssetResourceRenewalRequest', 'UIInputView', 'NSFileWrapper', 'UIResponder', 'NSPointerFunctions', 'UIKeyCommand', 'NSHTTPCookieStorage', 'AVMediaSelectionOption', 'NSRunLoop', 'NSFileAccessIntent', 'CAAnimationGroup', 'MKCircle', 'UIAlertController', 'NSMigrationManager', 'NSDateIntervalFormatter', 'UICollectionViewUpdateItem', 'CKDatabaseOperation', 'PHImageRequestOptions', 'SKReachConstraints', 'CKRecord', 'CAInterAppAudioSwitcherView', 'WKWindowFeatures', 'GKInvite', 'NSMutableData', 'PHAssetCollectionChangeRequest', 'NSMutableParagraphStyle', 'UIDynamicBehavior', 'GLKEffectProperty', 'CKFetchRecordChangesOperation', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'MPContentItem', 'HMAction', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'CKContainer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'AVCaptureMovieFileOutput', 'HMRoom', 'GKChallenge', 'UITextRange', 'NSURLProtectionSpace', 'ACAccountStore', 'MPSkipIntervalCommand', 'NSComparisonPredicate', 'HMHome', 'PHVideoRequestOptions', 'NSOutputStream', 'MPSkipIntervalCommandEvent', 'PKAddPassesViewController', 'UITextSelectionRect', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'UICollectionViewController', 'AVAudioNode', 'MCNearbyServiceAdvertiser', 'NSObject', 'PHAsset', 'GKLeaderboardViewController', 'CKQueryCursor', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'CMPedometerData', 'HMService', 'SKFieldNode', 'GKAchievement', 'WKUserContentController', 'AVAssetTrack', 'TWRequest', 'SKLabelNode', 'AVCaptureBracketedStillImageSettings', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'MTLTextureDescriptor', 'MTLVertexBufferLayoutDescriptor', 'MPFeedbackCommandEvent', 'UIUserNotificationAction', 'HKStatisticsQuery', 'SCNParticleSystem', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'HKObserverQuery', 'UIPopoverPresentationController', 'CKQueryOperation', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'NSURLQueryItem', 'MTLStructMember', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'GKTurnBasedParticipant', 'NSURLProtocol', 'NSUserDefaults', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'CKModifyRecordZonesOperation', 'WKPreferences', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'PHContentEditingInputRequestOptions', 'GKMatch', 'CIColor', 'UIDictationPhrase'])
COCOA_PROTOCOLS = set(['SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'MTLComputeCommandEncoder', 'SCNSceneExportDelegate', 'UISearchResultsUpdating', 'MFMailComposeViewControllerDelegate', 'MTLBlitCommandEncoder', 'NSDecimalNumberBehaviors', 'PHContentEditingController', 'NSMutableCopying', 'UIActionSheetDelegate', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UIAdaptivePresentationControllerDelegate', 'GKAchievementViewControllerDelegate', 'UIViewControllerTransitionCoordinator', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'UITableViewDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'AVAudio3DMixing', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'HMAccessoryBrowserDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'CKRecordValue', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'SKSceneDelegate', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'SCNBoundingVolume', 'NSPortDelegate', 'UIContentContainer', 'SCNNodeRendererDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'HMAccessoryDelegate', 'UIPageViewControllerDataSource', 'SCNSceneRendererDelegate', 'SCNPhysicsContactDelegate', 'MKMapViewDelegate', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'MTLRenderCommandEncoder', 'PKPaymentAuthorizationViewControllerDelegate', 'UIToolbarDelegate', 'WKUIDelegate', 'SCNActionable', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'PKPushRegistryDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSLayoutManagerDelegate', 'MTLLibrary', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'MTLResource', 'NSDiscardableContent', 'UITextFieldDelegate', 'MTLBuffer', 'MTLSamplerState', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UISplitViewControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UITraitEnvironment', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'MPPlayableContentDataSource', 'MTLComputePipelineState', 'NSURLSessionDelegate', 'MTLCommandBuffer', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'MTLDepthStencilState', 'GKTurnBasedMatchmakerViewControllerDelegate', 'MPPlayableContentDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'UITextDocumentProxy', 'MTLDrawable', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'AVAudioStereoMixing', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'SCNProgramDelegate', 'AVVideoCompositing', 'SCNAnimatable', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'NSExtensionRequestHandling', 'UITextSelecting', 'UIPrinterPickerControllerDelegate', 'NCWidgetProviding', 'MTLCommandEncoder', 'NSURLProtocolClient', 'MFMessageComposeViewControllerDelegate', 'UIVideoEditorControllerDelegate', 'WKNavigationDelegate', 'GKSavedGameListener', 'UITableViewDataSource', 'MTLFunction', 'EKCalendarChooserDelegate', 'NSUserActivityDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'HMHomeDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIDocumentPickerDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'SCNSceneRenderer', 'MTLTexture', 'GLKViewDelegate', 'EAAccessoryDelegate', 'WKScriptMessageHandler', 'PHPhotoLibraryChangeObserver', 'NSKeyedUnarchiverDelegate', 'AVPlayerItemMetadataOutputPushDelegate', 'NSMachPortDelegate', 'SCNShadable', 'UIPopoverBackgroundViewMethods', 'UIDocumentMenuDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'AVAudioMixing', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'MTLParallelRenderCommandEncoder', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UIPopoverPresentationControllerDelegate', 'UIDynamicAnimatorDelegate', 'NSTextAttachmentContainer', 'MKAnnotation', 'UIAccessibilityIdentification', 'UICoordinateSpace', 'ABNewPersonViewControllerDelegate', 'MTLDevice', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'HMHomeManagerDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'UISearchControllerDelegate', 'EAWiFiUnconfiguredAccessoryBrowserDelegate', 'UITextInputTraits', 'MTLRenderPipelineState', 'GKVoiceChatClient', 'UIKeyInput', 'UICollectionViewDataSource', 'SCNTechniqueSupport', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'MTLCommandQueue', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'CAMetalDrawable', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'NSURLSessionDataDelegate'])
COCOA_PRIMITIVES = set(['ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', '_GLKMatrix3', '_GLKMatrix2', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'OpaqueCMBlockBuffer', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'AudioFile_SMPTE_Time', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', '__CFBinaryHeap', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', 'SFNTLookupSegment', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__SecCertificate', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', 'AnchorPoint', 'JustTable', '__CFNetService', 'CF_BRIDGED_TYPE', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'KerxSubtableHeader', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', 'OpaqueJSClass', 'CGGradient', 'OpaqueMIDISetup', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', 'MIDIThruConnectionParams', 'BslnFormat0Part', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'AnkrTable', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'SCNMatrix4', 'KerxControlPointEntry', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'KernStateEntry', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'AVAudio3DAngularOrientation', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'KernVersion0Header', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'GCQuaternion', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'CGImagePropertyOrientation', 'CGPDFScanner', 'OpaqueMusicEventIterator', 'sfntDescriptorHeader', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'BslnFormat1Part', 'sfntFontDescriptor', 'KernSimpleArrayHeader', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGInterpolationQuality', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', '__CTRubyAnnotation', 'SCNVector4', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__GLsync', '__CFStringTokenizer', 'JustWidthDeltaEntry', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', '__CVMetalTextureCache', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'AVAudio3DPoint', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', 'CGDataConsumerCallbacks', 'ALMXHeader', 'CGLineCap', 'MIDIControlTransform', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'AVAudio3DVectorOrientation', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'OpaqueVTCompressionSession', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'VTDecompressionOutputCallbackRecord', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'OpaqueVTDecompressionSession', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'PropTable', 'KernOrderedListEntry', 'CF_BRIDGED_MUTABLE_TYPE', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'SCNVector3', 'OpaqueAudioConverter', 'MIDIRawData', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', '__SecAccessControl', 'JustPCAction', 'OpaqueVTFrameSilo', 'OpaqueVTMultiPassStorage', 'CGPathElementType', 'AudioFormatListItem', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'sfntCMapHeader', '__CFURLEnumerator', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader'])
if __name__ == '__main__': # pragma: no cover
import os
import re
FRAMEWORKS_PATH = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.1.sdk/System/Library/Frameworks/'
frameworks = os.listdir(FRAMEWORKS_PATH)
all_interfaces = set()
all_protocols = set()
all_primitives = set()
for framework in frameworks:
frameworkHeadersDir = FRAMEWORKS_PATH + framework + '/Headers/'
if not os.path.exists(frameworkHeadersDir):
continue
headerFilenames = os.listdir(frameworkHeadersDir)
for f in headerFilenames:
if not f.endswith('.h'):
continue
headerFilePath = frameworkHeadersDir + f
content = open(headerFilePath).read()
res = re.findall('(?<=@interface )\w+', content)
for r in res:
all_interfaces.add(r)
res = re.findall('(?<=@protocol )\w+', content)
for r in res:
all_protocols.add(r)
res = re.findall('(?<=typedef enum )\w+', content)
for r in res:
all_primitives.add(r)
res = re.findall('(?<=typedef struct )\w+', content)
for r in res:
all_primitives.add(r)
res = re.findall('(?<=typedef const struct )\w+', content)
for r in res:
all_primitives.add(r)
print("ALL interfaces: \n")
print(all_interfaces)
print("\nALL protocols: \n")
print(all_protocols)
print("\nALL primitives: \n")
print(all_primitives)
|
austinhartzheim/gravel
|
refs/heads/master
|
account/migrations/__init__.py
|
12133432
| |
RuralIndia/pari
|
refs/heads/master
|
pari/album/__init__.py
|
12133432
| |
marchon/poker
|
refs/heads/master
|
tests/handhistory/conftest.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import pytest
from poker.room.pokerstars import PokerStarsHandHistory
from . import stars_hands
# get every variable starting with 'HAND' from hand_data module
all_test_hands = [getattr(stars_hands, hand_text)
for hand_text in dir(stars_hands)
if hand_text.startswith('HAND')]
@pytest.fixture(params=all_test_hands)
def all_stars_hands(request):
"""Parse all hands from test_data and returns a PokerStarsHandHistory instance."""
hh = PokerStarsHandHistory(request.param)
hh.parse()
return hh
|
amadeusproject/amadeuslms
|
refs/heads/master
|
chat/serializers.py
|
1
|
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from rest_framework import serializers
from django.db.models import Q
from .models import TalkMessages, ChatFavorites
from subjects.serializers import SubjectSerializer
from users.serializers import UserSerializer
class ChatSerializer(serializers.ModelSerializer):
user = UserSerializer()
subject = SubjectSerializer()
favorite = serializers.SerializerMethodField()
def get_favorite(self, message):
user = self.context.get("request_user", None)
if not user is None:
return ChatFavorites.objects.filter(Q(user = user) & Q(message = message)).exists()
return False
class Meta:
model = TalkMessages
fields = ('text', 'user', 'subject', 'image_url', 'create_date', 'favorite', 'id')
|
LK/advent-of-code
|
refs/heads/master
|
2016/10-2.py
|
1
|
import re
class Bot(object):
def __init__(self, id, start, low, high):
self.id = id
self.chips = start
self.low = low
self.high = high
def give_chip(self, chip, bots):
self.chips.append(chip)
if len(self.chips) == 2:
min_chip = min(self.chips)
max_chip = max(self.chips)
self.chips = []
bots[self.low].give_chip(min_chip, bots)
bots[self.high].give_chip(max_chip, bots)
class OutputBot(Bot):
def give_chip(self, chip, bots):
self.chips.append(chip)
def get_id(out, id):
if out == 'bot':
return id
else:
return -(id+1)
def create_bot(out, id):
if out == 'bot':
return Bot(get_id(out, id), [], 0, 0)
else:
return OutputBot(get_id(out, id), [], 0, 0)
line = raw_input()
starting = {}
bots = {}
while line != '':
if line[:3] == 'bot':
m = re.search(r'bot (\d*?) gives low to (bot|output) (\d*?) and high to (bot|output) (\d*)', line)
origin_id = int(m.group(1))
low_out = m.group(2)
low_id = int(m.group(3))
high_out = m.group(4)
high_id = int(m.group(5))
if origin_id not in bots:
bots[origin_id] = Bot(origin_id, [], 0, 0)
if get_id(low_out, low_id) not in bots:
bots[get_id(low_out, low_id)] = create_bot(low_out, low_id)
if get_id(high_out, high_id) not in bots:
bots[get_id(high_out, high_id)] = create_bot(high_out, high_id)
bots[origin_id].low = get_id(low_out, low_id)
bots[origin_id].high = get_id(high_out, high_id)
else:
m = re.search(r'value (\d*?) goes to bot (\d*)', line)
if int(m.group(2)) not in starting:
starting[int(m.group(2))] = [int(m.group(1))]
else:
starting[int(m.group(2))].append(int(m.group(1)))
line = raw_input()
for id, vals in starting.iteritems():
for val in vals:
bots[id].give_chip(val, bots)
print bots[-1].chips[0] * bots[-2].chips[0] * bots[-3].chips[0]
|
gorhill/uMatrix
|
refs/heads/master
|
dist/chromium/publish-beta.py
|
1
|
#!/usr/bin/env python3
import datetime
import json
import jwt
import os
import re
import requests
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from distutils.version import StrictVersion
from string import Template
# - Download target (raw) uMatrix.chromium.zip from GitHub
# - This is referred to as "raw" package
# - This will fail if not a dev build
# - Upload uMatrix.chromium.zip to Chrome store
# - Publish uMatrix.chromium.zip to Chrome store
# Find path to project root
projdir = os.path.split(os.path.abspath(__file__))[0]
while not os.path.isdir(os.path.join(projdir, '.git')):
projdir = os.path.normpath(os.path.join(projdir, '..'))
# We need a version string to work with
if len(sys.argv) >= 2 and sys.argv[1]:
version = sys.argv[1]
else:
version = input('Github release version: ')
version.strip()
if not re.search('^\d+\.\d+\.\d+(b|rc)\d+$', version):
print('Error: Invalid version string.')
exit(1)
cs_extension_id = 'eckgcipdkhcfghnmincccnhpdmnbefki'
tmpdir = tempfile.TemporaryDirectory()
raw_zip_filename = 'uMatrix_'+ version + '.chromium.zip'
raw_zip_filepath = os.path.join(tmpdir.name, raw_zip_filename)
github_owner = 'gorhill'
github_repo = 'uMatrix'
# Load/save auth secrets
# The build directory is excluded from git
ubo_secrets = dict()
ubo_secrets_filename = os.path.join(projdir, 'dist', 'build', 'ubo_secrets')
if os.path.isfile(ubo_secrets_filename):
with open(ubo_secrets_filename) as f:
ubo_secrets = json.load(f)
def input_secret(prompt, token):
if token in ubo_secrets:
prompt += ' ✔'
prompt += ': '
value = input(prompt).strip()
if len(value) == 0:
if token not in ubo_secrets:
print('Token error:', token)
exit(1)
value = ubo_secrets[token]
elif token not in ubo_secrets or value != ubo_secrets[token]:
ubo_secrets[token] = value
exists = os.path.isfile(ubo_secrets_filename)
with open(ubo_secrets_filename, 'w') as f:
json.dump(ubo_secrets, f, indent=2)
if not exists:
os.chmod(ubo_secrets_filename, 0o600)
return value
# GitHub API token
github_token = input_secret('Github token', 'github_token')
github_auth = 'token ' + github_token
#
# Get metadata from GitHub about the release
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release
print('Downloading release info from GitHub...')
release_info_url = 'https://api.github.com/repos/{0}/{1}/releases/tags/{2}'.format(github_owner, github_repo, version)
headers = { 'Authorization': github_auth, }
response = requests.get(release_info_url, headers=headers)
if response.status_code != 200:
print('Error: Release not found: {0}'.format(response.status_code))
exit(1)
release_info = response.json()
#
# Extract URL to raw package from metadata
#
# Find url for uMatrix.chromium.zip
raw_zip_url = ''
for asset in release_info['assets']:
if asset['name'] == raw_zip_filename:
raw_zip_url = asset['url']
if len(raw_zip_url) == 0:
print('Error: Release asset URL not found')
exit(1)
#
# Download raw package from GitHub
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
print('Downloading raw zip package from GitHub...')
headers = {
'Authorization': github_auth,
'Accept': 'application/octet-stream',
}
response = requests.get(raw_zip_url, headers=headers)
# Redirections are transparently handled:
# http://docs.python-requests.org/en/master/user/quickstart/#redirection-and-history
if response.status_code != 200:
print('Error: Downloading raw package failed -- server error {0}'.format(response.status_code))
exit(1)
with open(raw_zip_filepath, 'wb') as f:
f.write(response.content)
print('Downloaded raw package saved as {0}'.format(raw_zip_filepath))
#
# Upload to Chrome store
#
# Auth tokens
cs_id = input_secret('Chrome store id', 'cs_id')
cs_secret = input_secret('Chrome store secret', 'cs_secret')
cs_refresh = input_secret('Chrome store refresh token', 'cs_refresh')
print('Uploading to Chrome store...')
with open(raw_zip_filepath, 'rb') as f:
print('Generating access token...')
auth_url = 'https://accounts.google.com/o/oauth2/token'
auth_payload = {
'client_id': cs_id,
'client_secret': cs_secret,
'grant_type': 'refresh_token',
'refresh_token': cs_refresh,
}
auth_response = requests.post(auth_url, data=auth_payload)
if auth_response.status_code != 200:
print('Error: Auth failed -- server error {0}'.format(auth_response.status_code))
print(auth_response.text)
exit(1)
response_dict = auth_response.json()
if 'access_token' not in response_dict:
print('Error: Auth failed -- no access token')
exit(1)
# Prepare access token
cs_auth = 'Bearer ' + response_dict['access_token']
headers = {
'Authorization': cs_auth,
'x-goog-api-version': '2',
}
# Upload
print('Uploading package...')
upload_url = 'https://www.googleapis.com/upload/chromewebstore/v1.1/items/{0}'.format(cs_extension_id)
upload_response = requests.put(upload_url, headers=headers, data=f)
f.close()
if upload_response.status_code != 200:
print('Upload failed -- server error {0}'.format(upload_response.status_code))
print(upload_response.text)
exit(1)
response_dict = upload_response.json();
if 'uploadState' not in response_dict or response_dict['uploadState'] != 'SUCCESS':
print('Upload failed -- server error {0}'.format(response_dict['uploadState']))
exit(1)
print('Upload succeeded.')
# Publish
print('Publishing package...')
publish_url = 'https://www.googleapis.com/chromewebstore/v1.1/items/{0}/publish'.format(cs_extension_id)
headers = {
'Authorization': cs_auth,
'x-goog-api-version': '2',
'Content-Length': '0',
}
publish_response = requests.post(publish_url, headers=headers)
if publish_response.status_code != 200:
print('Error: Chrome store publishing failed -- server error {0}'.format(publish_response.status_code))
exit(1)
response_dict = publish_response.json();
if 'status' not in response_dict or response_dict['status'][0] != 'OK':
print('Publishing failed -- server error {0}'.format(response_dict['status']))
exit(1)
print('Publishing succeeded.')
print('All done.')
|
netcon-source/OpenClimateGIS
|
refs/heads/master
|
src/openclimategis/util/ncconv/experimental/ocg_dataset/sub.py
|
7
|
from util.ncconv.experimental.helpers import *
from shapely import prepared
from shapely.ops import cascaded_union
from util.ncconv.experimental.ocg_dataset.todb import sub_to_db
from util.ncconv.experimental.ordered_dict import OrderedDict
from warnings import warn
class SubOcgDataset(object):
__attrs__ = ['geometry','value','gid','weight','timevec','levelvec','tid']
def __init__(self,geometry,value,timevec,gid=None,levelvec=None,mask=None,id=None,tid=None):
"""
geometry -- numpy array with dimension (n) of shapely Polygon
objects
value -- numpy array with dimension (time,level,n)
gid -- numpy array containing integer unique ids for the grid cells.
has dimension (n)
timevec -- numpy array with indices corresponding to time dimension of
value
mask -- boolean mask array with same dimension as value. will subset other
inputs if passed. a value to be masked is indicated by True.
tid -- numpy array containing integer unique ids for the time cells.
has dimension (n)
"""
self.id = id
self.geometry = np.array(geometry)
self.value = np.array(value)
self.timevec = np.array(timevec)
if gid is not None:
self.gid = np.array(gid)
else:
self.gid = np.arange(1,len(self.geometry)+1)
if tid is not None:
self.tid = np.array(tid)
else:
self.tid = np.arange(1,len(self.timevec)+1)
if levelvec is not None:
self.levelvec = np.array(levelvec)
else:
if len(self.value) == 0:
self.levelvec = np.array([])
else:
self.levelvec = np.arange(1,self.value.shape[1]+1)
## if the mask is passed, subset the data
if mask is not None:
mask = np.invert(mask)[0,0,:]
self.geometry = self.geometry[mask]
self.gid = self.gid[mask]
self.value = self.value[:,:,mask]
## calculate nominal weights
self.weight = np.ones(self.geometry.shape,dtype=float)
@property
def has_value_set(self):
if hasattr(self,'value_set'):
return(True)
else:
return(False)
def to_db(self,**kwds):
"""
Convert the object to a database. See documentation for |sub_to_db| for
guidance on |kwds|.
"""
return(sub_to_db(self,**kwds))
@timing
def to_grid_dict(self,ocg_dataset):
"""
Generate spatial grid information for NetCDF output. Assumes an
intersects-like operation with no union
ocg_dataset -- OcgDataset object. This is needed to establish the
reference grid.
"""
## make the bounding polygon
envelope = MultiPolygon(self.geometry.tolist()).envelope
## get the x,y vectors
print('subsetting centroids...')
x,y = ocg_dataset.spatial.subset_centroids(envelope)
## make the grids
gx,gy = np.meshgrid(x,y)
## make the empty boolean array
mask = np.empty((self.value.shape[0],
self.value.shape[1],
gx.shape[0],
gx.shape[1]),dtype=bool)
mask[:,:,:,:] = True
## make the empty geometry id
gidx = np.empty(gx.shape,dtype=int)
gidx = np.ma.array(gidx,mask=mask[0,0,:,:])
## make the empty value array
# value = np.empty(mask.shape,dtype=float)
## loop for each centroid
print('finding centroid location in array...')
for ii,geom in enumerate(self.geometry):
diffx = abs(gx - geom.centroid.x)
diffy = abs(gy - geom.centroid.y)
diff = diffx + diffy
idx = diff == diff.min()
mask[:,:,idx] = False
gidx[idx] = ii
# for dt in self.dim_time:
# for dl in self.dim_level:
# value[dt,dl,idx] = self.value[dt,dl,ii]
# construct the masked array
# value = np.ma.array(value,mask=mask,fill_value=fill_value)
## if level is not included, squeeze out the dimension
# if not include_level:
# value = value.squeeze()
# ## construct row and column bounds
# xbnds = np.empty((len(self.geometry),2),dtype=float)
# ybnds = xbnds.copy()
## subset the bounds
xbnds,ybnds = ocg_dataset.spatial.subset_bounds(envelope)
## put the data together
ret = dict(xbnds=xbnds,ybnds=ybnds,x=x,y=y,mask=mask,gidx=gidx)
return(ret)
def copy(self,**kwds):
"""
Copy a SubOcgDataset object. The optional |kwds| may be new data values
to overwrite the original values. Useful when wanting to only
replace a subset of attributes while creating copies of the object.
"""
new_ds = copy.copy(self)
def _find_set(kwd):
val = kwds.get(kwd)
if val is not None:
setattr(new_ds,kwd,val)
for attr in self.__attrs__: _find_set(attr)
return(new_ds)
def merge(self,sub,id=None,union=False):
"""
Merges another SubOcgDataset object with this instance. Assumes same
time and level vectors.
id=None (int) -- Unique identifier for the newly merged dataset. If the
default is used, the new dataset has no ID.
"""
geometry = np.hstack((self.geometry,sub.geometry))
value = np.dstack((self.value,sub.value))
gid = np.hstack((self.gid,sub.gid))
weight = np.hstack((self.weight,sub.weight))
if self.has_value_set:
self.value_set = np.ma.dstack((self.value_set,sub.value_set))
## if there are non-unique cell ids (which may happen with union
## operations, regenerate the unique values.
if union:
if len(gid) > len(np.unique(gid)):
gid = np.arange(1,len(gid)+1)
return(self.copy(geometry=geometry,
value=value,
gid=gid,
id=id,
weight=weight))
@timing
def purge(self):
"""
Removes duplicate geometries from this object instance.
"""
unique,uidx = np.unique([geom.wkb for geom in self.geometry],return_index=True)
self.geometry = self.geometry[uidx]
self.gid = self.gid[uidx]
self.value = self.value[:,:,uidx]
#
# if self.has_value_set:
# self.value_set = self.value_set[:,:,uidx]
def __iter__(self):
"""
Default object iterator returning a dictionary representation of each
object "record".
"""
ocgid = 0
for dt in self.dim_time:
tid = int(self.tid[dt])
for dl in self.dim_level:
for dd in self.dim_data:
ocgid += 1
keys = ['OCGID','GID','TID','LEVEL','TIME','VALUE','geometry']
values = [ocgid,
int(self.gid[dd]),
tid,
int(self.levelvec[dl]),
self.timevec[dt],
float(self.value[dt,dl,dd]),
self.geometry[dd]]
d = OrderedDict(zip(keys,values))
yield(d)
def iter_value_keyed(self):
pops = ['geometry','TIME']
for row in self:
for pop in pops: row.pop(pop)
yield(row)
def iter_time(self,expand=True):
for dt in self.dim_time:
d = OrderedDict(zip(['TID','TIME'],[int(self.tid[dt]),self.timevec[dt]]))
if expand:
attrs = ['YEAR','MONTH','DAY']
for attr in attrs:
d.update({attr:getattr(d['TIME'],attr.lower())})
yield(d)
def iter_with_area(self,area_srid=3005,wkt=False,wkb=False,keep_geom=True):
"""
Wraps the default object iterator appending the geometric area of a
geometry.
area_srid=3005 (int) -- The SRID to use for the area transform.
"""
sr_orig = get_sr(4326)
sr_dest = get_sr(area_srid)
for attrs in self:
if not keep_geom:
geom = attrs.pop('geometry')
else:
geom = attrs['geometry']
attrs.update({'AREA_M2':get_area(geom,sr_orig,sr_dest)})
if wkt:
attrs.update(dict(WKT=geom.wkt))
if wkb:
attrs.update(dict(WKB=geom.wkb))
yield(attrs)
def iter_geom_with_area(self,area_srid=3005,keep_geom=True,wkt=False,wkb=False):
sr_orig = get_sr(4326)
sr_dest = get_sr(area_srid)
for gid,geom in zip(self.gid,self.geometry):
if keep_geom:
d = OrderedDict(zip(
['GID','AREA_M2','geometry'],
[int(gid),get_area(geom,sr_orig,sr_dest),geom]))
else:
d = OrderedDict(zip(
['GID','AREA_M2'],
[int(gid),get_area(geom,sr_orig,sr_dest)]))
if wkt:
d.update(dict(WKT=geom.wkt))
if wkb:
d.update(dict(WKB=geom.wkb))
yield(d)
def _range_(self,idx):
try:
return(range(self.value.shape[idx]))
except IndexError:
return([])
@property
def dim_time(self):
return(self._range_(0))
@property
def dim_level(self):
return(self._range_(1))
@property
def dim_data(self):
return(self._range_(2))
@property
def area(self):
"""
Return the object's untransformed geometric area.
"""
area = 0.0
for geom in self.geometry:
area += geom.area
return(area)
def clip(self,igeom):
"""
Clip the object to the extent of a geometry.
igeom (shapely.Polygon or shapely.MultiPolygon) -- The geometric extent
to clip the object to.
"""
prep_igeom = prepared.prep(igeom)
for ii,geom in enumerate(self.geometry):
if keep(prep_igeom,igeom,geom):
new_geom = igeom.intersection(geom)
weight = new_geom.area/geom.area
assert(weight != 0.0) #tdk
self.weight[ii] = weight
self.geometry[ii] = new_geom
@timing
def select_values(self,igeom=None,clip=False):
## if an intersection geometry is passed, use it to calculate the weights
## but do not modify the geometry. this weight is used to select values
## to keep for set statistics.
## this is the case of no intersection geometry. basically, requesting
## the entire dataset.
if clip and igeom is None:
mask = np.zeros(self.value_shape)
elif clip and igeom is not None:
prep_igeom = prepared.prep(igeom)
for ii,geom in enumerate(self.geometry):
if keep(prep_igeom,igeom,geom):
new_geom = igeom.intersection(geom)
weight = new_geom.area/geom.area
assert(weight != 0.0) #tdk
self.weight[ii] = weight
## it has now been clipped
clip = False
if not clip:
## loop through the weights determining which values to maintain based
## on weights.
idx = []
for ii,weight in enumerate(self.weight):
if weight > 0.5:
idx.append(ii)
elif weight == 0.5:
warn('0.5 weight encountered. Removing it.')
## select the data and store in special variable for use by set statistics
mask = np.ones(self.value.shape)
mask[:,:,idx] = 0
self.value_set = np.ma.masked_array(self.value,mask=mask)
def report_shape(self):
for attr in self.__attrs__:
rattr = getattr(self,attr)
msg = '{0}={1}'.format(attr,getattr(rattr,'shape'))
print(msg)
def union(self):
"""
Union the object's geometries and return an area-weighted sum of its
values.
"""
self._union_geom_()
self._union_sum_()
def union_nosum(self):
"""
Union the geometries WITHOUT area-weighting its values.
"""
self._union_geom_()
def _union_geom_(self):
"""
Union the object's geometries.
"""
## just using np.array() on a multipolgon object
## results in a (1,n) array of polygons.
new_geometry = np.array([None],dtype=object)
new_geometry[0] = cascaded_union(self.geometry)
self.geometry = new_geometry
def _union_sum_(self):
"""
Area-weight the object's values. No geometric transformation.
"""
self.value = union_sum(self.weight,self.value,normalize=True)
self.gid = np.array([1])
def display(self,show=True,overlays=None):
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch
ax = plt.axes()
x = []
y = []
for geom in self.geometry:
if isinstance(geom,MultiPolygon):
for geom2 in geom:
try:
ax.add_patch(PolygonPatch(geom2,alpha=0.5))
except:
geom2 = wkt.loads(geom2.wkt)
ax.add_patch(PolygonPatch(geom2,alpha=0.5))
ct = geom2.centroid
x.append(ct.x)
y.append(ct.y)
else:
ax.add_patch(PolygonPatch(geom,alpha=0.5))
ct = geom.centroid
x.append(ct.x)
y.append(ct.y)
if overlays is not None:
for geom in overlays:
ax.add_patch(PolygonPatch(geom,alpha=0.5,fc='#999999'))
ax.scatter(x,y,alpha=1.0)
if show: plt.show()
|
joone/chromium-crosswalk
|
refs/heads/2016.04.css-round-display-edtior-draft-1
|
tools/site_compare/scrapers/ie/__init__.py
|
179
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Selects the appropriate scraper for Internet Explorer."""
def GetScraper(version):
"""Returns the scraper module for the given version.
Args:
version: version string of IE, or None for most recent
Returns:
scrape module for given version
"""
# Pychecker will warn that the parameter is unused; we only
# support one version of IE at this time
# We only have one version of the IE scraper for now
return __import__("ie7", globals(), locals(), [''])
# if invoked rather than imported, test
if __name__ == "__main__":
print GetScraper("7.0.5370.1").version
|
colinnewell/odoo
|
refs/heads/8.0
|
addons/website/tests/test_views.py
|
144
|
# -*- coding: utf-8 -*-
import itertools
import unittest2
from lxml import etree as ET, html
from lxml.html import builder as h
from openerp.tests import common
def attrs(**kwargs):
return dict(('data-oe-%s' % key, str(value)) for key, value in kwargs.iteritems())
class TestViewSaving(common.TransactionCase):
def eq(self, a, b):
self.assertEqual(a.tag, b.tag)
self.assertEqual(a.attrib, b.attrib)
self.assertEqual((a.text or '').strip(), (b.text or '').strip())
self.assertEqual((a.tail or '').strip(), (b.tail or '').strip())
for ca, cb in itertools.izip_longest(a, b):
self.eq(ca, cb)
def setUp(self):
super(TestViewSaving, self).setUp()
self.arch = h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
)
self.view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'name': "Test View",
'type': 'qweb',
'arch': ET.tostring(self.arch, encoding='utf-8').decode('utf-8')
})
def test_embedded_extraction(self):
fields = self.registry('ir.ui.view').extract_embedded_fields(
self.cr, self.uid, self.arch, context=None)
expect = [
h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char')),
h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')),
]
for actual, expected in itertools.izip_longest(fields, expect):
self.eq(actual, expected)
def test_embedded_save(self):
embedded = h.SPAN("+00 00 000 00 0 000", attrs(
model='res.company', id=1, field='phone', type='char'))
self.registry('ir.ui.view').save_embedded_field(self.cr, self.uid, embedded)
company = self.registry('res.company').browse(self.cr, self.uid, 1)
self.assertEqual(company.phone, "+00 00 000 00 0 000")
@unittest2.skip("save conflict for embedded (saved by third party or previous version in page) not implemented")
def test_embedded_conflict(self):
e1 = h.SPAN("My Company", attrs(model='res.company', id=1, field='name'))
e2 = h.SPAN("Leeroy Jenkins", attrs(model='res.company', id=1, field='name'))
View = self.registry('ir.ui.view')
View.save_embedded_field(self.cr, self.uid, e1)
# FIXME: more precise exception
with self.assertRaises(Exception):
View.save_embedded_field(self.cr, self.uid, e2)
def test_embedded_to_field_ref(self):
View = self.registry('ir.ui.view')
embedded = h.SPAN("My Company", attrs(expression="bob"))
self.eq(
View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob'})
)
def test_to_field_ref_keep_attributes(self):
View = self.registry('ir.ui.view')
att = attrs(expression="bob", model="res.company", id=1, field="name")
att['id'] = "whop"
att['class'] = "foo bar"
embedded = h.SPAN("My Company", att)
self.eq(View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob', 'class': 'foo bar', 'id': 'whop'}))
def test_replace_arch(self):
replacement = h.P("Wheee")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, h.DIV("Wheee"))
def test_replace_arch_2(self):
replacement = h.DIV(h.P("Wheee"))
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, replacement)
def test_fixup_arch(self):
replacement = h.H1("I am the greatest title alive!")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div[1]/h3',
replacement)
self.eq(result, h.DIV(
h.DIV(
h.H3("I am the greatest title alive!"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
))
def test_multiple_xpath_matches(self):
with self.assertRaises(ValueError):
self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div/h3',
h.H6("Lol nope"))
def test_save(self):
Company = self.registry('res.company')
View = self.registry('ir.ui.view')
replacement = ET.tostring(h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN("Acme Corporation", attrs(model='res.company', id=1, field='name', expression="bob", type='char'))),
h.LI(h.SPAN("+12 3456789", attrs(model='res.company', id=1, field='phone', expression="edmund", type='char'))),
)
), encoding='utf-8')
View.save(self.cr, self.uid, res_id=self.view_id, value=replacement,
xpath='/div/div[2]')
company = Company.browse(self.cr, self.uid, 1)
self.assertEqual(company.name, "Acme Corporation")
self.assertEqual(company.phone, "+12 3456789")
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN({'t-field': "bob"})),
h.LI(h.SPAN({'t-field': "edmund"}))
))
)
)
def test_save_escaped_text(self):
""" Test saving html special chars in text nodes """
view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'arch':'<t><p><h1>hello world</h1></p></t>',
'type':'qweb'
})
view = self.registry('ir.ui.view').browse(self.cr, self.uid, view_id)
# script and style text nodes should not escaped client side
replacement = '<script>1 && "hello & world"</script>'
view.save(replacement, xpath='/t/p/h1')
self.assertIn(
replacement.replace('&', '&'),
view.arch,
'inline script should be escaped server side'
)
self.assertIn(
replacement,
view.render(),
'inline script should not be escaped when rendering'
)
# common text nodes should be be escaped client side
replacement = 'world &amp; &lt;b&gt;cie'
view.save(replacement, xpath='/t/p')
self.assertIn(replacement, view.arch, 'common text node should not be escaped server side')
self.assertIn(
replacement,
view.render().replace('&', '&'),
'text node characters wrongly unescaped when rendering'
)
def test_save_only_embedded(self):
Company = self.registry('res.company')
company_id = 1
Company.write(self.cr, self.uid, company_id, {'name': "Foo Corporation"})
node = html.tostring(h.SPAN(
"Acme Corporation",
attrs(model='res.company', id=company_id, field="name", expression='bob', type='char')))
self.registry('ir.ui.view').save(self.cr, self.uid, res_id=company_id,value=node)
company = Company.browse(self.cr, self.uid, company_id)
self.assertEqual(company.name, "Acme Corporation")
def test_field_tail(self):
View = self.registry('ir.ui.view')
replacement = ET.tostring(
h.LI(h.SPAN("+12 3456789", attrs(
model='res.company', id=1, type='char',
field='phone', expression="edmund")),
"whop whop"
), encoding="utf-8")
View.save(self.cr, self.uid, res_id = self.view_id, value=replacement,
xpath='/div/div[2]/ul/li[3]')
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN({'t-field': "edmund"}), "whop whop"),
))
)
)
|
was4444/chromium.src
|
refs/heads/nw15
|
third_party/libxslt/win32/runtests.py
|
1
|
import io
import os
import sys
import difflib
from os import path
from subprocess import Popen, PIPE
xsltproc = path.join(os.getcwd(), "win32", "bin.msvc", "xsltproc.exe")
if not path.isfile(xsltproc):
raise FileNotFoundError(xsltproc)
def runtests(xsl_dir, xml_dir="."):
old_dir = os.getcwd()
os.chdir(xsl_dir)
for xsl_file in os.listdir():
if not xsl_file.endswith(".xsl"):
continue
xsl_path = "./" + xsl_file
name = path.splitext(xsl_file)[0]
xml_path = path.join(xml_dir + "/" + name + ".xml")
if not path.isfile(xml_path):
continue
args = [ xsltproc, xsl_path, xml_path ]
p = Popen(args, stdout=PIPE, stderr=PIPE)
out_path = path.join(xml_dir, name + ".out")
err_path = path.join(xml_dir, name + ".err")
out_diff = diff(p.stdout, "<stdout>", name + ".out")
err_diff = diff(p.stderr, "<stderr>", name + ".err")
if (len(out_diff) or len(err_diff)):
sys.stdout.writelines(out_diff)
sys.stdout.writelines(err_diff)
print()
os.chdir(old_dir)
def diff(got_stream, got_name, expected_path):
text_stream = io.TextIOWrapper(got_stream, encoding="latin_1")
got_lines = text_stream.readlines()
if path.isfile(expected_path):
file = open(expected_path, "r", encoding="latin_1")
expected_lines = file.readlines()
else:
expected_lines = []
diff = difflib.unified_diff(expected_lines, got_lines,
fromfile=expected_path,
tofile=got_name)
return list(diff)
print("## Running REC tests")
runtests("tests/REC")
print("## Running general tests")
runtests("tests/general", "./../docs")
print("## Running exslt common tests")
runtests("tests/exslt/common")
print("## Running exslt functions tests")
runtests("tests/exslt/functions")
print("## Running exslt math tests")
runtests("tests/exslt/math")
print("## Running exslt sets tests")
runtests("tests/exslt/sets")
print("## Running exslt strings tests")
runtests("tests/exslt/strings")
print("## Running exslt dynamic tests")
runtests("tests/exslt/dynamic")
print("## Running exslt date tests")
runtests("tests/exslt/date")
|
merzlyakov-me/merzlyakov-me-blog
|
refs/heads/master
|
merzlyakov/seo/models.py
|
1
|
from django.db import models
class PostExcerpt(models.Model):
"""
Excerpt to generate meta description
"""
class Meta:
abstract = True
def verify_title_seo(self):
title_length = len(self.title)
title_words = len(self.title.split(' '))
error_msgs = []
if title_length > 65:
self.title_seo = False
error_msgs.append("Title is more than 65 chars long.")
elif title_words > 8:
pass # TODO: add SEO checks
|
andaluri/rootio_web
|
refs/heads/master
|
scheduler/test_scripts/env.py
|
7
|
import re, os, sys
def read_env(from_file):
"""Read and set environment variables from a file
https://gist.github.com/bennylope/2999704"""
try:
with open(from_file) as f:
content = f.read()
except IOError:
print "IOError: unable to open",from_file
sys.exit(-1)
new_env = {}
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9.:]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
os.environ.setdefault(key, val)
new_env[key]=val
return new_env
|
fuselock/odoo
|
refs/heads/8.0
|
openerp/tools/misc.py
|
62
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping, OrderedDict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
try:
return which(name, path=os.pathsep.join(path))
except IOError:
return None
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
""" On systems where pg_restore/pg_dump require an explicit password (i.e.
on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if not env.get('PGPASSWORD') and openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
joernhees/scikit-learn
|
refs/heads/master
|
sklearn/manifold/mds.py
|
20
|
"""
Multi-dimensional Scaling (MDS)
"""
# author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
import numpy as np
import warnings
from ..base import BaseEstimator
from ..metrics import euclidean_distances
from ..utils import check_random_state, check_array, check_symmetric
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..isotonic import IsotonicRegression
def _smacof_single(dissimilarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, optional, default: None
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = dissimilarities
else:
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
def smacof(dissimilarities, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""Computes multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, optional, default: 8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For ``n_jobs`` below -1,
(``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs
but one are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, optional, default: None
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
return_n_iter : bool, optional, default: False
Whether or not to return the number of iterations.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
dissimilarities = check_array(dissimilarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
dissimilarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities.
metric : boolean, optional, default: True
If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
n_init : int, optional, default: 4
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For ``n_jobs`` below -1,
(``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs
but one are used.
random_state : int, RandomState instance or None, optional, default: None
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean'
Dissimilarity measure to use:
- 'euclidean':
Pairwise Euclidean distances between points in the dataset.
- 'precomputed':
Pre-computed dissimilarities are passed directly to ``fit`` and
``fit_transform``.
Attributes
----------
embedding_ : array-like, shape (n_components, n_samples)
Stores the position of the dataset in the embedding space.
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
X = check_array(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_
|
intirix/LedManager
|
refs/heads/master
|
Pi/bin/SendCommand.py
|
1
|
#!/usr/bin/python
import serial
import os
import sys
dev = "/dev/ttyACM0"
if not os.path.exists( dev ):
dev = "/dev/ttyACM1"
if not os.path.exists( dev ):
print( "Could not find a device to use" )
sys.exit(1)
print( "Using device " + dev )
ser = serial.Serial( dev )
cmd = ' '.join( sys.argv[ 1 : ] )
print( "Sending: " + cmd )
ser.write( cmd + "\n" )
out = ser.readline()
print( "> " + out )
print( "Done." )
|
benjamindeleener/odoo
|
refs/heads/master
|
addons/account_budget/wizard/account_budget_report.py
|
47
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
class account_budget_report(osv.osv_memory):
_name = "account.budget.report"
_description = "Account Budget report for analytic account"
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.budget.post',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-full'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_budget', data=datas, context=context)
|
kieferbonk/xbmc-finnish-tv
|
refs/heads/master
|
plugin.video.yleareena/win32/Crypto/SelfTest/Protocol/test_rfc1751.py
|
132
|
#
# Test script for Crypto.Util.RFC1751.
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import binascii
import unittest
from Crypto.Util import RFC1751
from Crypto.Util.py3compat import *
test_data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
class RFC1751Test_k2e (unittest.TestCase):
def runTest (self):
"Check converting keys to English"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.key_to_english(key), words)
class RFC1751Test_e2k (unittest.TestCase):
def runTest (self):
"Check converting English strings to keys"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.english_to_key(words), key)
# class RFC1751Test
def get_tests(config={}):
return [RFC1751Test_k2e(), RFC1751Test_e2k()]
if __name__ == "__main__":
unittest.main()
|
takeflight/wagtail
|
refs/heads/master
|
wagtail/search/tests/test_related_fields.py
|
24
|
from django.test import TestCase
from wagtail.search import index
from wagtail.tests.search.models import Book, Novel
from wagtail.tests.testapp.models import Advert, ManyToManyBlogPage
class TestSelectOnQuerySet(TestCase):
def test_select_on_queryset_with_foreign_key(self):
fields = index.RelatedFields('protagonist', [
index.SearchField('name'),
])
queryset = fields.select_on_queryset(Novel.objects.all())
# ForeignKey should be select_related
self.assertFalse(queryset._prefetch_related_lookups)
self.assertIn('protagonist', queryset.query.select_related)
def test_select_on_queryset_with_one_to_one(self):
fields = index.RelatedFields('book_ptr', [
index.SearchField('title'),
])
queryset = fields.select_on_queryset(Novel.objects.all())
# OneToOneField should be select_related
self.assertFalse(queryset._prefetch_related_lookups)
self.assertIn('book_ptr', queryset.query.select_related)
def test_select_on_queryset_with_many_to_many(self):
fields = index.RelatedFields('adverts', [
index.SearchField('title'),
])
queryset = fields.select_on_queryset(ManyToManyBlogPage.objects.all())
# ManyToManyField should be prefetch_related
self.assertIn('adverts', queryset._prefetch_related_lookups)
self.assertFalse(queryset.query.select_related)
def test_select_on_queryset_with_reverse_foreign_key(self):
fields = index.RelatedFields('categories', [
index.RelatedFields('category', [
index.SearchField('name')
])
])
queryset = fields.select_on_queryset(ManyToManyBlogPage.objects.all())
# reverse ForeignKey should be prefetch_related
self.assertIn('categories', queryset._prefetch_related_lookups)
self.assertFalse(queryset.query.select_related)
def test_select_on_queryset_with_reverse_one_to_one(self):
fields = index.RelatedFields('novel', [
index.SearchField('subtitle'),
])
queryset = fields.select_on_queryset(Book.objects.all())
# reverse OneToOneField should be select_related
self.assertFalse(queryset._prefetch_related_lookups)
self.assertIn('novel', queryset.query.select_related)
def test_select_on_queryset_with_reverse_many_to_many(self):
fields = index.RelatedFields('manytomanyblogpage', [
index.SearchField('title'),
])
queryset = fields.select_on_queryset(Advert.objects.all())
# reverse ManyToManyField should be prefetch_related
self.assertIn('manytomanyblogpage', queryset._prefetch_related_lookups)
self.assertFalse(queryset.query.select_related)
def test_select_on_queryset_with_taggable_manager(self):
fields = index.RelatedFields('tags', [
index.SearchField('name'),
])
queryset = fields.select_on_queryset(Novel.objects.all())
# Tags should be prefetch_related
self.assertIn('tags', queryset._prefetch_related_lookups)
self.assertFalse(queryset.query.select_related)
|
sarantapichos/faircoop-market
|
refs/heads/master
|
addons/base_import/test_models.py
|
399
|
from openerp.osv import orm, fields
def name(n): return 'base_import.tests.models.%s' % n
class char(orm.Model):
_name = name('char')
_columns = {
'value': fields.char('unknown')
}
class char_required(orm.Model):
_name = name('char.required')
_columns = {
'value': fields.char('unknown', required=True)
}
class char_readonly(orm.Model):
_name = name('char.readonly')
_columns = {
'value': fields.char('unknown', readonly=True)
}
class char_states(orm.Model):
_name = name('char.states')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', False)]})
}
class char_noreadonly(orm.Model):
_name = name('char.noreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('invisible', True)]})
}
class char_stillreadonly(orm.Model):
_name = name('char.stillreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', True)]})
}
# TODO: complex field (m2m, o2m, m2o)
class m2o(orm.Model):
_name = name('m2o')
_columns = {
'value': fields.many2one(name('m2o.related'))
}
class m2o_related(orm.Model):
_name = name('m2o.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class m2o_required(orm.Model):
_name = name('m2o.required')
_columns = {
'value': fields.many2one(name('m2o.required.related'), required=True)
}
class m2o_required_related(orm.Model):
_name = name('m2o.required.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class o2m(orm.Model):
_name = name('o2m')
_columns = {
'value': fields.one2many(name('o2m.child'), 'parent_id')
}
class o2m_child(orm.Model):
_name = name('o2m.child')
_columns = {
'parent_id': fields.many2one(name('o2m')),
'value': fields.integer()
}
class preview_model(orm.Model):
_name = name('preview')
_columns = {
'name': fields.char('Name'),
'somevalue': fields.integer('Some Value', required=True),
'othervalue': fields.integer('Other Variable'),
}
|
pyQode/pyqode.python
|
refs/heads/master
|
examples/modes/calltips.py
|
3
|
"""
Minimal example showing the use of the CalltipsMode.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.api import CodeEdit
from pyqode.python.backend import server
from pyqode.python.modes import CalltipsMode
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
editor = CodeEdit()
editor.backend.start(server.__file__)
editor.resize(800, 600)
print(editor.modes.append(CalltipsMode()))
editor.show()
editor.appendPlainText(
'import os\nos.path.join')
app.exec_()
editor.close()
del editor
del app
|
SpaceVim/SpaceVim
|
refs/heads/master
|
bundle/defx.nvim/rplugin/python3/defx/column/__init__.py
|
12133432
| |
vsporeddy/bigbang
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
edx/lettuce
|
refs/heads/master
|
tests/integration/django/cucumber/settings.py
|
20
|
# Django settings for cucumber project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3c=9-_@gug3+!j*o*b$1!g8e7037(ghrns8pqygog1gs1f^zqu'
ROOT_URLCONF = 'cucumber.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
INSTALLED_APPS = (
'lettuce.django',
'first',
'second',
)
|
ElementalAlchemist/txircd
|
refs/heads/master
|
txircd/modules/extra/sapart.py
|
1
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import ICommand, IModuleData, Command, ModuleData
from zope.interface import implements
class SapartCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "SapartCommand"
def userCommands(self):
return [ ("SAPART", 1, self) ]
def actions(self):
return [ ("commandpermission-SAPART", 1, self.restrictToOpers) ]
def restrictToOpers(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-sapart", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
def parseParams(self, user, params, prefix, tags):
if len(params) < 2:
user.sendSingleError("SapartCmd", irc.ERR_NEEDMOREPARAMS, "SAPART", "Not enough parameters")
return None
if params[0] not in self.ircd.userNicks:
user.sendSingleError("SapartCmd", irc.ERR_NOSUCHNICK, params[0], "No such nick")
return None
if params[1] not in self.ircd.channels:
user.sendSingleError("SapartCmd", irc.ERR_NOSUCHCHANNEL, params[1], "No such channel")
return None
target = self.ircd.users[self.ircd.userNicks[params[0]]]
channel = self.ircd.channels[params[1]]
if target not in channel.users:
user.sendSingleError("SapartCmd", irc.ERR_USERNOTINCHANNEL, params[1], "They are not on that channel")
return None
reason = " ".join(params[2:]) if len(params) > 2 else ""
reason = reason[:self.ircd.config.get("part_message_length", 300)]
return {
"target": target,
"channel": channel,
"reason": reason
}
def affectedChannels(self, source, data):
return [ data["channel"] ]
def affectedUsers(self, source, data):
return [ data["target"] ]
def execute(self, user, data):
target = data["target"]
channel = data["channel"]
reason = data["reason"]
target.leaveChannel(channel, "PART", { "reason": reason })
self.ircd.log.info("User {user.uuid} ({user.nick}) forcibly part user {targetUser.uuid} ({targetUser.nick}) from channel {channel.name}", user=user, targetUser=target, channel=channel)
return True
sapart = SapartCommand()
|
JackGavin13/octoprint-test-not-finished
|
refs/heads/master
|
src/octoprint/util/platform/__init__.py
|
1
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2017 The OctoPrint Project - Released under terms of the AGPLv3 License"
import sys
try:
import fcntl
except ImportError:
fcntl = None
# set_close_exec
if fcntl is not None and hasattr(fcntl, "FD_CLOEXEC"):
def set_close_exec(handle):
flags = fcntl.fcntl(handle, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(handle, fcntl.F_SETFD, flags)
elif sys.platform == "win32":
def set_close_exec(handle):
import ctypes
import ctypes.wintypes
# see https://msdn.microsoft.com/en-us/library/ms724935(v=vs.85).aspx
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
SetHandleInformation.restype = ctypes.c_bool
HANDLE_FLAG_INHERIT = 0x00000001
result = SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0)
if not result:
raise ctypes.GetLastError()
else:
def set_close_exec(handle):
# no-op
pass
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_service_tags_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations(object):
"""ServiceTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceTagsListResult"
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
|
fengbeihong/tempest_automate_ironic
|
refs/heads/master
|
tempest/api/compute/admin/test_floating_ips_bulk.py
|
3
|
# Copyright 2014 NEC Technologies India Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class FloatingIPsBulkAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Floating IPs Bulk APIs Create, List and Delete that
require admin privileges.
API documentation - http://docs.openstack.org/api/openstack-compute/2/
content/ext-os-floating-ips-bulk.html
"""
@classmethod
def setup_clients(cls):
super(FloatingIPsBulkAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.floating_ips_client
@classmethod
def resource_setup(cls):
super(FloatingIPsBulkAdminTestJSON, cls).resource_setup()
cls.ip_range = CONF.compute.floating_ip_range
cls.verify_unallocated_floating_ip_range(cls.ip_range)
@classmethod
def verify_unallocated_floating_ip_range(cls, ip_range):
# Verify whether configure floating IP range is not already allocated.
body = cls.client.list_floating_ips_bulk()
allocated_ips_list = map(lambda x: x['address'], body)
for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts():
if str(ip_addr) in allocated_ips_list:
msg = ("Configured unallocated floating IP range is already "
"allocated. Configure the correct unallocated range "
"as 'floating_ip_range'")
raise exceptions.InvalidConfiguration(msg)
return
def _delete_floating_ips_bulk(self, ip_range):
try:
self.client.delete_floating_ips_bulk(ip_range)
except Exception:
pass
@test.attr(type='gate')
@test.idempotent_id('2c8f145f-8012-4cb8-ac7e-95a587f0e4ab')
@test.services('network')
def test_create_list_delete_floating_ips_bulk(self):
# Create, List and delete the Floating IPs Bulk
pool = 'test_pool'
# NOTE(GMann): Reserving the IP range but those are not attached
# anywhere. Using the below mentioned interface which is not ever
# expected to be used. Clean Up has been done for created IP range
interface = 'eth0'
body = self.client.create_floating_ips_bulk(self.ip_range,
pool,
interface)
self.addCleanup(self._delete_floating_ips_bulk, self.ip_range)
self.assertEqual(self.ip_range, body['ip_range'])
ips_list = self.client.list_floating_ips_bulk()
self.assertNotEqual(0, len(ips_list))
for ip in netaddr.IPNetwork(self.ip_range).iter_hosts():
self.assertIn(str(ip), map(lambda x: x['address'], ips_list))
body = self.client.delete_floating_ips_bulk(self.ip_range)
self.assertEqual(self.ip_range, body.data)
|
ric2b/Vivaldi-browser
|
refs/heads/master
|
chromium/third_party/blink/web_tests/external/wpt/tools/wptrunner/wptrunner/browsers/edge_webdriver.py
|
31
|
from .base import inherit
from . import edge
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
inherit(edge, globals(), "edge_webdriver")
# __wptrunner__ magically appears from inherit, F821 is undefined name
__wptrunner__["executor"]["testharness"] = "WebDriverTestharnessExecutor" # noqa: F821
__wptrunner__["executor"]["reftest"] = "WebDriverRefTestExecutor" # noqa: F821
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_thicket_small_fog_red.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_thicket_small_fog_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","thicket")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Absimpl/Abstream
|
refs/heads/master
|
kivytest_version2_12_windows/kivy/multistroke.py
|
43
|
'''
Multistroke gesture recognizer
==============================
.. versionadded::
1.9.0
.. warning::
This is experimental and subject to change as long as this warning notice
is present.
See :file:`kivy/examples/demo/multistroke/main.py` for a complete application
example.
Conceptual Overview
-------------------
This module implements the Protractor gesture recognition algorithm.
:class:`Recognizer` is the search/database API similar to
:class:`~kivy.gesture.GestureDatabase`. It maintains a list of
:class:`MultistrokeGesture` objects and allows you to search for a
user-input gestures among them.
:class:`ProgressTracker` tracks the progress of a :meth:`Recognizer.recognize`
call. It can be used to interact with the running recognizer task, for example
forcing it to stop half-way, or analyzing results as they arrive.
:class:`MultistrokeGesture` represents a gesture in the gesture database
(:attr:`Recognizer.db`). It is a container for :class:`UnistrokeTemplate`
objects, and implements the heap permute algorithm to automatically generate
all possible stroke orders (if desired).
:class:`UnistrokeTemplate` represents a single stroke path. It's typically
instantiated automatically by :class:`MultistrokeGesture`, but sometimes you
may need to create them manually.
:class:`Candidate` represents a user-input gesture that is used to search
the gesture database for matches. It is normally instantiated automatically
by calling :meth:`Recognizer.recognize`.
Usage examples
--------------
See :file:`kivy/examples/demo/multistroke/main.py` for a complete application
example.
You can bind to events on :class:`Recognizer` to track the state of all
calls to :meth:`Recognizer.recognize`. The callback function will receive an
instance of :class:`ProgressTracker` that can be used to analyze and control
various aspects of the recognition process ::
from kivy.vector import Vector
from kivy.multistroke import Recognizer
gdb = Recognizer()
def search_start(gdb, pt):
print("A search is starting with %d tasks" % (pt.tasks))
def search_stop(gdb, pt):
# This will call max() on the result dictonary, so it's best to store
# it instead of calling it 3 times consecutively
best = pt.best
print("Search ended (%s). Best is %s (score %f, distance %f)" % (
pt.status, best['name'], best['score'], best['dist'] ))
# Bind your callbacks to track all matching operations
gdb.bind(on_search_start=search_start)
gdb.bind(on_search_complete=search_stop)
# The format below is referred to as `strokes`, a list of stroke paths.
# Note that each path shown here consists of two points, ie a straight
# line; if you plot them it looks like a T, hence the name.
gdb.add_gesture('T', [
[Vector(30, 7), Vector(103, 7)],
[Vector(66, 7), Vector(66, 87)]])
# Now you can search for the 'T' gesture using similar data (user input).
# This will trigger both of the callbacks bound above.
gdb.recognize([
[Vector(45, 8), Vector(110, 12)],
[Vector(88, 9), Vector(85, 95)]])
On the next :class:`~kivy.clock.Clock` tick, the matching process starts
(and, in this case, completes).
To track individual calls to :meth:`Recognizer.recognize`, use the return
value (also a :class:`ProgressTracker` instance) ::
# Same as above, but keep track of progress using returned value
progress = gdb.recognize([
[Vector(45, 8), Vector(110, 12)],
[Vector(88, 9), Vector(85, 95)]])
progress.bind(on_progress=my_other_callback)
print(progress.progress) # = 0
# [ assuming a kivy.clock.Clock.tick() here ]
print(result.progress) # = 1
Algorithm details
-----------------
For more information about the matching algorithm, see:
"Protractor: A fast and accurate gesture recognizer" by Yang Li
http://yangl.org/pdf/protractor-chi2010.pdf
"$N-Protractor" by Lisa Anthony and Jacob O. Wobbrock
http://depts.washington.edu/aimgroup/proj/dollar/ndollar-protractor.pdf
Some of the code is derived from the JavaScript implementation here:
http://depts.washington.edu/aimgroup/proj/dollar/ndollar.html
'''
__all__ = ('Recognizer', 'ProgressTracker', 'MultistrokeGesture',
'UnistrokeTemplate', 'Candidate')
import pickle
import base64
import zlib
from re import match as re_match
from collections import deque
from math import sqrt, pi, radians, acos, atan, atan2, pow, floor
from math import sin as math_sin, cos as math_cos
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.properties import ListProperty
from kivy.compat import PY2
from io import BytesIO
if not PY2:
xrange = range
# Default number of gesture matches per frame
# FIXME: relevant number
DEFAULT_GPF = 10
# Algorithm data
SQUARESIZE = 250.0
ONEDTHRESHOLD = 0.25
ORIGIN = Vector(0, 0)
class MultistrokeError(Exception):
pass
# -----------------------------------------------------------------------------
# Recognizer
# -----------------------------------------------------------------------------
class Recognizer(EventDispatcher):
''':class:`Recognizer` provides a gesture database with matching
facilities.
:Events:
`on_search_start`
Fired when a new search is started using this Recognizer.
`on_search_complete`
Fired when a running search ends, for whatever reason.
(use :data:`ProgressTracker.status` to find out)
:Properties:
`db`
A :class:`ListProperty` that contains the available
:class:`MultistrokeGesture` objects.
:attr:`db` is a
:class:`~kivy.properties.ListProperty` and defaults to []
'''
db = ListProperty([])
def __init__(self, **kwargs):
super(Recognizer, self).__init__(**kwargs)
self.register_event_type('on_search_start')
self.register_event_type('on_search_complete')
def filter(self, **kwargs):
''':meth:`filter` returns a subset of objects in :attr:`self.db`,
according to given criteria. This is used by many other methods of
the :class:`Recognizer`; the arguments below can for example be
used when calling :meth:`Recognizer.recognize` or
:meth:`Recognizer.export_gesture`. You normally don't need to call
this directly.
:Arguments:
`name`
Limits the returned list to gestures where
:attr:`MultistrokeGesture.name` matches given regular
expression(s). If re.match(name, MultistrokeGesture.name)
tests true, the gesture is included in the returned list.
Can be a string or an array of strings ::
gdb = Recognizer()
# Will match all names that start with a captial N
# (ie Next, New, N, Nebraska etc, but not "n" or "next")
gdb.filter(name='N')
# exactly 'N'
gdb.filter(name='N$')
# Nebraska, teletubbies, France, fraggle, N, n, etc
gdb.filter(name=['[Nn]', '(?i)T', '(?i)F'])
`priority`
Limits the returned list to gestures with certain
:attr:`MultistrokeGesture.priority` values. If specified as an
integer, only gestures with a lower priority are returned. If
specified as a list (min/max) ::
# Max priority 50
gdb.filter(priority=50)
# Max priority 50 (same result as above)
gdb.filter(priority=[0, 50])
# Min priority 50, max 100
gdb.filter(priority=[50, 100])
When this option is used, :attr:`Recognizer.db` is automatically
sorted according to priority, incurring extra cost. You can use
`force_priority_sort` to override this behavior if your gestures
are already sorted according to priority.
`orientation_sensitive`
Limits the returned list to gestures that are
orientation sensitive (True), gestures that are not orientation
sensitive (False) or None (ignore template sensitivity, this is
the default).
`numstrokes`
Limits the returned list to gestures that have the specified
number of strokes (in :attr:`MultistrokeGesture.strokes`).
Can be a single integer or a list of integers.
`numpoints`
Limits the returned list to gestures that have specific
:attr:`MultistrokeGesture.numpoints` values. This is provided
for flexibility, do not use it unless you understand what it
does. Can be a single integer or a list of integers.
`force_priority_sort`
Can be used to override the default sort behavior. Normally
:class:`MultistrokeGesture` objects are returned in priority
order if the `priority` option is used. Setting this to True
will return gestures sorted in priority order, False will
return in the order gestures were added. None means decide
automatically (the default).
.. Note ::
For improved performance, you can load your gesture
database in priority order and set this to False when
calling :meth:`Recognizer.recognize`
`db`
Can be set if you want to filter a different list of objects
than :attr:`Recognizer.db`. You probably don't want to do this;
it is used internally by :meth:`import_gesture`.
'''
have_filters = False
kwargs_get = kwargs.get
name = kwargs_get('name', None)
if name is not None:
have_filters = True
if not isinstance(name, list):
name = [name]
priority = kwargs_get('priority', None)
min_p, max_p = None, None
if priority is not None:
have_filters = True
if isinstance(priority, list):
min_p, max_p = priority
elif isinstance(priority, int):
min_p, max_p = None, priority
numstrokes = kwargs_get('numstrokes', None)
if numstrokes is not None:
have_filters = True
if not isinstance(numstrokes, list):
numstrokes = [numstrokes]
numpoints = kwargs_get('numpoints', None)
if numpoints is not None:
have_filters = True
if not isinstance(numpoints, list):
numpoints = [numpoints]
orientation_sens = kwargs_get('orientation_sensitive', None)
if orientation_sens is not None:
have_filters = True
# Prepare a correctly sorted tasklist
force_priority_sort = kwargs.get('force_priority_sort', None)
force_sort_on = force_priority_sort and True
force_sort_off = (force_priority_sort is False) and True
db = kwargs.get('db', None) or self.db
if (force_sort_on or priority) and not force_sort_off:
tasklist = sorted(db, key=lambda n: n.priority)
else:
tasklist = db
# Now test each gesture in the database against filter criteria
out = deque()
if not have_filters:
out.extend(tasklist)
return out
out_append = out.append
for gesture in tasklist:
if (orientation_sens is not None and
orientation_sens != gesture.orientation_sens):
continue
if numpoints and gesture.numpoints not in numpoints:
continue
if numstrokes and len(gesture.strokes) not in numstrokes:
continue
if min_p is not None and gesture.priority < min_p:
continue
if max_p is not None and gesture.priority > max_p:
return out
if name:
for f in name:
if re_match(f, gesture.name):
out_append(gesture)
break
else:
out_append(gesture)
return out
def add_gesture(self, name, strokes, **kwargs):
'''Add a new gesture to the database. This will instantiate a new
:class:`MultistrokeGesture` with `strokes` and append it to self.db.
.. Note ::
If you already have instantiated a :class:`MultistrokeGesture`
object and wish to add it, append it to :attr:`Recognizer.db`
manually.
'''
if not strokes:
return False
self.db.append(MultistrokeGesture(name=name, strokes=strokes, **kwargs))
return True
def parse_gesture(self, data):
'''Parse data formatted by export_gesture(). Returns a list of
:class:`MultistrokeGesture` objects. This is used internally by
:meth:`import_gesture`, you normally don't need to call this
directly.'''
io = BytesIO(zlib.decompress(base64.b64decode(data)))
p = pickle.Unpickler(io)
multistrokes = []
ms_append = multistrokes.append
for multistroke in p.load():
strokes = multistroke['strokes']
multistroke['strokes'] = [[Vector(
x, y) for x, y in line] for line in strokes]
ms_append(MultistrokeGesture(**multistroke))
return multistrokes
# FIXME: use a try block, maybe shelve or something
def export_gesture(self, filename=None, **kwargs):
'''Export a list of :class:`MultistrokeGesture` objects. Outputs a
base64-encoded string that can be decoded to a Python list with
the :meth:`parse_gesture` function or imported directly to
:attr:`self.db` using :meth:`Recognizer.import_gesture`. If
`filename` is specified, the output is written to disk, otherwise
returned.
This method accepts optional :meth:`Recognizer.filter` arguments.
'''
io = BytesIO()
p = pickle.Pickler(io, protocol=0)
multistrokes = []
defaults = {'priority': 100, 'numpoints': 16, 'stroke_sens': True,
'orientation_sens': False, 'angle_similarity': 30.0}
dkeys = defaults.keys()
for multistroke in self.filter(**kwargs):
m = dict(defaults)
m = {'name': multistroke.name}
for attr in dkeys:
m[attr] = getattr(multistroke, attr)
m['strokes'] = tuple([(p.x, p.y) for p in line]
for line in multistroke.strokes)
multistrokes.append(m)
p.dump(multistrokes)
if filename:
f = open(filename, 'wb')
f.write(base64.b64encode(zlib.compress(io.getvalue(), 9)))
f.close()
else:
return base64.b64encode(zlib.compress(io.getvalue(), 9))
# FIXME: match them all with protractor, and don't load exacts? or
# just compare the data or something; seems better to do this on import
# than on every subsequent call to recognize(). And fix it in general,
# too.
def import_gesture(self, data=None, filename=None, **kwargs):
'''Import a list of gestures as formatted by :meth:`export_gesture`.
One of `data` or `filename` must be specified.
This method accepts optional :meth:`Recognizer.filter` arguments,
if none are specified then all gestures in specified data are
imported.'''
if filename is not None:
with open(filename, "rb") as infile:
data = infile.read()
elif data is None:
raise MultistrokeError('import_gesture needs data= or filename=')
new = self.filter(db=self.parse_gesture(data), **kwargs)
if new:
self.db.extend(new)
def transfer_gesture(self, tgt, **kwargs):
'''Transfers :class:`MultistrokeGesture` objects from
:attr:`Recognizer.db` to another :class:`Recognizer` instance `tgt`.
This method accepts optional :meth:`Recognizer.filter` arguments.
'''
if hasattr(tgt, 'db') and isinstance(tgt.db, list):
send = self.filter(**kwargs)
if send:
tgt.db.append(None)
tgt.db[-1:] = send
return True
def prepare_templates(self, **kwargs):
'''This method is used to prepare :class:`UnistrokeTemplate` objects
within the gestures in self.db. This is useful if you want to minimize
punishment of lazy resampling by preparing all vectors in advance. If
you do this before a call to :meth:`Recognizer.export_gesture`, you
will have the vectors computed when you load the data later.
This method accepts optional :meth:`Recognizer.filter` arguments.
`force_numpoints`, if specified, will prepare all templates to the
given number of points (instead of each template's preferred n; ie
:data:`UnistrokeTemplate.numpoints`). You normally don't want to
do this.'''
for gesture in self.filter(**kwargs):
for tpl in gesture:
n = kwargs.get('force_numpoints', tpl.numpoints)
tpl.prepare(n)
def recognize(self, strokes, goodscore=None, timeout=0, delay=0, **kwargs):
'''Search for gestures matching `strokes`. Returns a
:class:`ProgressTracker` instance.
This method accepts optional :meth:`Recognizer.filter` arguments.
:Arguments:
`strokes`
A list of stroke paths (list of lists of
:class:`~kivy.vector.Vector` objects) that will be matched
against gestures in the database. Can also be a
:class:`Candidate` instance.
.. Warning ::
If you manually supply a :class:`Candidate` that has a
skip-flag, make sure that the correct filter arguments
are set. Otherwise the system will attempt to load vectors
that have not been computed. For example, if you set
`skip_bounded` and do not set `orientation_sensitive` to
False, it will raise an exception if an
orientation_sensitive :class:`UnistrokeTemplate`
is encountered.
`goodscore`
If this is set (between 0.0 - 1.0) and a gesture score is
equal to or higher than the specified value, the search is
immediately halted and the on_search_complete event is
fired (+ the on_complete event of the associated
:class:`ProgressTracker` instance). Default is None (disabled).
`timeout`
Specifies a timeout (in seconds) for when the search is
aborted and the results returned. This option applies only
when `max_gpf` is not 0. Default value is 0, meaning all
gestures in the database will be tested, no matter how long
it takes.
`max_gpf`
Specifies the maximum number of :class:`MultistrokeGesture`
objects that can be processed per frame. When exceeded, will
cause the search to halt and resume work in the next frame.
Setting to 0 will complete the search immediately (and block
the UI).
.. Warning ::
This does not limit the number of
:class:`UnistrokeTemplate` objects matched! If a single
gesture has a million templates, they will all be
processed in a single frame with max_gpf=1!
`delay`
Sets an optional delay between each run of the recognizer
loop. Normally, a run is scheduled for the next frame until
the tasklist is exhausted. If you set this, there will be an
additional delay between each run (specified in seconds).
Default is 0, resume in the next frame.
`force_numpoints`
forces all templates (and candidate) to be prepared to a
certain number of points. This can be useful for example if
you are evaluating templates for optimal n (do not use this
unless you understand what it does).
'''
GPF = kwargs.get('max_gpf', DEFAULT_GPF)
# Obtain a list of MultistrokeGesture objects matching filter arguments
tasklist = self.filter(**kwargs)
# Initialize the candidate and result objects
cand = self._candidate(strokes)
result = ProgressTracker(cand, len(tasklist))
# This is done to inform caller if they bind to on_complete and there
# is nothing to do; perhaps should just return None?
if not tasklist:
result.status = 'complete'
self.dispatch('on_search_complete', result)
def result_hack(dt):
result.dispatch('on_complete')
Clock.schedule_once(result_hack)
return result
# This callback is scheduled once per frame until completed
def _recognize_tick(dt):
start_gc = result._completed
stop_now = False
while not stop_now and (tasklist and not result._break_flag) and \
(not GPF or (result._completed - start_gc < GPF)):
if (timeout and
Clock.get_time() - result._start_time >= timeout):
result.status = 'timeout'
stop_now = True
break
# Get the best distance and number of matching operations done
gesture = tasklist.popleft()
tpl, d, res, mos = gesture.match_candidate(
cand, **kwargs)
if tpl is not None:
score = result._add_result(gesture, d, tpl, res)
if goodscore is not None and score >= goodscore:
result.status = 'goodscore'
stop_now = True
result._match_ops += mos
result._completed += 1
result.dispatch('on_progress')
# The loop has ended. Prepare to dispatch 'complete'
def _dispatch():
result.dispatch('on_complete')
self.dispatch('on_search_complete', result)
return False
# Dispatch or reschedule another run
if not tasklist:
result.status = 'complete'
return _dispatch()
elif result._break_flag:
result.status = 'stop'
return _dispatch()
elif stop_now:
return _dispatch()
else:
Clock.schedule_once(_recognize_tick, delay)
return True
# End _recognize_tick()
self.dispatch('on_search_start', result)
if not GPF:
_recognize_tick(0)
else:
Clock.schedule_once(_recognize_tick, 0)
return result
def _candidate(self, strokes, **kwargs):
# recognize() helper function, do not use directly. Set up a
# Candidate object from arguments. Either use a specified object
# or make a new one from strokes and apply safe skip_* settings to
# use less resources.
if isinstance(strokes, Candidate):
return strokes
if (not isinstance(strokes, list) or not len(strokes) or not
isinstance(strokes[0], list)):
raise MultistrokeError('recognize() needs strokes= '
'list or Candidate')
cand = Candidate(strokes)
o_filter = kwargs.get('orientation_sensitive', None)
if o_filter is False:
cand.skip_bounded = True
elif o_filter is True:
cand.skip_invariant = True
return cand
# Default event handlers
def on_search_start(self, result):
pass
def on_search_complete(self, result):
pass
# -----------------------------------------------------------------------------
# ProgressTracker
# -----------------------------------------------------------------------------
class ProgressTracker(EventDispatcher):
'''Represents an ongoing (or completed) search operation. Instantiated and
returned by the :meth:`Recognizer.recognize` method when it is called. The
`results` attribute is a dictionary that is updated as the recognition
operation progresses.
.. Note ::
You do not need to instantiate this class.
:Arguments:
`candidate`
:class:`Candidate` object to be evaluated
`tasks`
Total number of gestures in tasklist (to test against)
:Events:
`on_progress`
Fired for every gesture that is processed
`on_result`
Fired when a new result is added, and it is the first match
for the `name` so far, or a consecutive match with better score.
`on_complete`
Fired when the search is completed, for whatever reason.
(use `ProgressTracker.status` to find out)
:Attributes:
`results`
A dictionary of all results (so far). The key is the name of the
gesture (ie :attr:`UnistrokeTemplate.name` usually inherited from
:class:`MultistrokeGesture`). Each item in the dictionary is a
dict with the following entries:
`name`
Name of the matched template (redundant)
`score`
Computed score from 1.0 (perfect match) to 0.0
`dist`
Cosine distance from candidate to template (low=closer)
`gesture`
The :class:`MultistrokeGesture` object that was matched
`best_template`
Index of the best matching template (in
:attr:`MultistrokeGesture.templates`)
`template_results`
List of distances for all templates. The list index
corresponds to a :class:`UnistrokeTemplate` index in
gesture.templates.
`status`
`search`
Currently working
`stop`
Was stopped by the user (:meth:`stop` called)
`timeout`
A timeout occured (specified as `timeout=` to recognize())
`goodscore`
The search was stopped early because a gesture with a high
enough score was found (specified as `goodscore=` to
recognize())
`complete`
The search is complete (all gestures matching filters were
tested)
'''
def __init__(self, candidate, tasks, **kwargs):
self.status = 'search'
self.candidate = candidate
self.results = {}
self.tasks = tasks
self._start_time = Clock.get_time()
self._match_ops = 0
self._completed = 0
self._break_flag = False
# fired by recognize()
self.register_event_type('on_complete')
self.register_event_type('on_progress')
# fired locally
self.register_event_type('on_result')
super(ProgressTracker, self).__init__(**kwargs)
@property
def progress(self):
'''Returns the progress as a float, 0 is 0% done, 1 is 100%. This
is a Python property.'''
if not self.tasks:
return 1
return self._completed / float(self.tasks)
@property
def best(self):
'''Return the best match found by recognize() so far. It returns a
dictionary with three keys, 'name', 'dist' and 'score' representing
the template's name, distance (from candidate path) and the
computed score value. This is a Python property.'''
results = self.results # to avoid too many self. lookups
if not results:
return {'name': None, 'dist': None, 'score': 0}
b = max(results, key=lambda r: results[r]['score'])
return {
'name': results[b]['name'],
'dist': results[b]['dist'],
'score': results[b]['score']
}
def stop(self):
'''Raises a stop flag that is checked by the search process. It will
be stopped on the next clock tick (if it is still running).'''
self._break_flag = True
def _add_result(self, gesture, dist, tpl, res):
# Add a result; used internally by the recognize() function
if tpl <= len(res):
n = gesture.templates[tpl].name
else:
return 0.
if n not in self.results or dist < self.results[n]['dist']:
self.results[n] = {
'name': n,
'dist': dist,
'gesture': gesture,
'best_template': tpl,
'template_results': res
}
if not dist:
self.results[n]['score'] = 1.0
else:
self.results[n]['score'] = 1.0 - (dist / pi)
self.dispatch('on_result', self.results[n])
return self.results[n]['score']
else:
return 0.
def on_complete(self):
pass
def on_progress(self):
pass
def on_result(self, result):
pass
# -----------------------------------------------------------------------------
# MultistrokeGesture
# -----------------------------------------------------------------------------
class MultistrokeGesture(object):
''':class:`MultistrokeGesture` represents a gesture. It maintains a set of
`strokes` and generates unistroke (ie :class:`UnistrokeTemplate`)
permutations that are used for evaluating candidates against this gesture
later.
:Arguments:
`name`
Identifies the name of the gesture - it is returned to you in the
results of a :meth:`Recognizer.recognize` search. You can have any
number of MultistrokeGesture objects with the same name; many
definitions of one gesture. The same name is given to all the
generated unistroke permutations. Required, no default.
`strokes`
A list of paths that represents the gesture. A path is a list of
Vector objects::
gesture = MultistrokeGesture('my_gesture', strokes=[
[Vector(x1, y1), Vector(x2, y2), ...... ], # stroke 1
[Vector(), Vector(), Vector(), Vector() ] # stroke 2
#, [stroke 3], [stroke 4], ...
])
For template matching purposes, all the strokes are combined to a
single list (unistroke). You should still specify the strokes
individually, and set `stroke_sensitive` True (whenever possible).
Once you do this, unistroke permutations are immediately generated
and stored in `self.templates` for later, unless you set the
`permute` flag to False.
`priority`
Determines when :func:`Recognizer.recognize` will attempt to match
this template, lower priorities are evaluated first (only if
a priority `filter` is used). You should use lower priority on
gestures that are more likely to match. For example, set user
templates at lower number than generic templates. Default is 100.
`numpoints`
Determines the number of points this gesture should be resampled to
(for matching purposes). The default is 16.
`stroke_sensitive`
Determines if the number of strokes (paths) in this gesture is
required to be the same in the candidate (user input) gesture
during matching. If this is False, candidates will always be
evaluated, disregarding the number of strokes. Default is True.
`orientation_sensitive`
Determines if this gesture is orientation sensitive. If True,
aligns the indicative orientation with the one of eight base
orientations that requires least rotation. Default is True.
`angle_similarity`
This is used by the :func:`Recognizer.recognize` function when a
candidate is evaluated against this gesture. If the angles between
them are too far off, the template is considered a non-match.
Default is 30.0 (degrees)
`permute`
If False, do not use Heap Permute algorithm to generate different
stroke orders when instantiated. If you set this to False, a
single UnistrokeTemplate built from `strokes` is used.
'''
def __init__(self, name, strokes=None, **kwargs):
self.name = name
self.priority = kwargs.get('priority', 100)
self.numpoints = kwargs.get('numpoints', 16)
self.stroke_sens = kwargs.get('stroke_sensitive', True)
self.orientation_sens = kwargs.get('orientation_sensitive', True)
self.angle_similarity = kwargs.get('angle_similarity', 30.0)
self.strokes = []
if strokes is not None:
self.strokes = strokes
if kwargs.get('permute', True):
self.permute()
else:
self.templates = [UnistrokeTemplate(name,
points=[i for sub in strokes for i in sub],
numpoints=self.numpoints,
orientation_sensitive=self.orientation_sens)]
def angle_similarity_threshold(self):
return radians(self.angle_similarity)
def add_stroke(self, stroke, permute=False):
'''Add a stroke to the self.strokes list. If `permute` is True, the
:meth:`permute` method is called to generate new unistroke templates'''
self.strokes.append(stroke)
if permute:
self.permute()
def get_distance(self, cand, tpl, numpoints=None):
'''Compute the distance from this Candiate to a UnistrokeTemplate.
Returns the Cosine distance between the stroke paths.
`numpoints` will prepare both the UnistrokeTemplate and Candidate path
to n points (when neccessary), you probably don't want to do this.
'''
n = numpoints
if n is None or n < 2:
n = self.numpoints
# optimal_cosine_distance() inlined here for performance
v1 = tpl.get_vector(n)
v2 = cand.get_protractor_vector(n, tpl.orientation_sens)
a = 0.0
b = 0.0
for i in xrange(0, len(v1), 2):
a += (v1[i] * v2[i]) + (v1[i + 1] * v2[i + 1])
b += (v1[i] * v2[i + 1]) - (v1[i + 1] * v2[i])
angle = atan(b / a)
# If you put the below directly into math.acos(), you will get a domain
# error when a=1.0 and angle=0.0 (ie math_cos(angle)=1.0). It seems to
# be because float representation of 1.0*1.0 is >1.0 (ie 1.00000...001)
# and this is problematic for math.acos(). If you try math.acos(1.0*1.0)
# in interpreter it does not happen, only with exact match at runtime
result = a * math_cos(angle) + b * math_sin(angle)
# FIXME: I'm sure there is a better way to do it but..
if result >= 1:
result = 1
elif result <= -1: # has not happened to me, but I leave it here.
result = -1
return acos(result)
def match_candidate(self, cand, **kwargs):
'''Match a given candidate against this MultistrokeGesture object. Will
test against all templates and report results as a list of four
items:
`index 0`
Best matching template's index (in self.templates)
`index 1`
Computed distance from the template to the candidate path
`index 2`
List of distances for all templates. The list index
corresponds to a :class:`UnistrokeTemplate` index in
self.templates.
`index 3`
Counter for the number of performed matching operations, ie
templates matched against the candidate
'''
best_d = float('infinity')
best_tpl = None
mos = 0
out = []
if (self.stroke_sens and len(self.strokes) != len(cand.strokes)):
return (best_tpl, best_d, out, mos)
skip_bounded = cand.skip_bounded
skip_invariant = cand.skip_invariant
get_distance = self.get_distance
ang_sim_threshold = self.angle_similarity_threshold()
for idx, tpl in enumerate(self.templates):
# Handle a theoretical case where a MultistrokeGesture is composed
# manually and the orientation_sensitive flag is True, and contains
# a UnistrokeTemplate that has orientation_sensitive=False (or vice
# versa). This would cause KeyError - requesing nonexistant vector.
if tpl.orientation_sens:
if skip_bounded:
continue
elif skip_invariant:
continue
# Count as a match operation now, since the call to get_
# angle_similarity below will force vector calculation,
# even if it doesn't make it to get_distance
mos += 1
# Note: With this implementation, we always resample the candidate
# to *any* encountered UnistrokeTemplate numpoints here, the filter
# is only applied to MultistrokeGesture. See theoretical case
# above; should not matter normally.
n = kwargs.get('force_numpoints', tpl.numpoints)
# Skip if candidate/gesture angles are too far off
ang_sim = cand.get_angle_similarity(tpl, numpoints=n)
if ang_sim > ang_sim_threshold:
continue
# Get the distance between cand/tpl paths
d = get_distance(cand, tpl, numpoints=n)
out.append(d)
if d < best_d:
best_d = d
best_tpl = idx
return (best_tpl, best_d, out, mos)
def permute(self):
'''Generate all possible unistroke permutations from self.strokes and
save the resulting list of UnistrokeTemplate objects in self.templates.
Quote from http://faculty.washington.edu/wobbrock/pubs/gi-10.2.pdf ::
We use Heap Permute [16] (p. 179) to generate all stroke orders
in a multistroke gesture. Then, to generate stroke directions for
each order, we treat each component stroke as a dichotomous
[0,1] variable. There are 2^N combinations for N strokes, so we
convert the decimal values 0 to 2^N-1, inclusive, to binary
representations and regard each bit as indicating forward (0) or
reverse (1). This algorithm is often used to generate truth tables
in propositional logic.
See section 4.1: "$N Algorithm" of the linked paper for details.
.. Warning ::
Using heap permute for gestures with more than 3 strokes
can result in very large number of templates (a 9-stroke
gesture = 38 million templates). If you are dealing with
these types of gestures, you should manually compose
all the desired stroke orders.
'''
# Seed with index of each stroke
self._order = [i for i in xrange(0, len(self.strokes))]
# Prepare ._orders
self._orders = []
self._heap_permute(len(self.strokes))
del self._order
# Generate unistroke permutations
self.templates = [UnistrokeTemplate(
self.name,
points=permutation,
numpoints=self.numpoints,
orientation_sensitive=self.orientation_sens
) for permutation in self._make_unistrokes()]
del self._orders
def _heap_permute(self, n):
# Heap Permute algorithm
self_order = self._order
if n == 1:
self._orders.append(self_order[:])
else:
i = 0
for i in xrange(0, n):
self._heap_permute(n - 1)
if n % 2 == 1:
tmp = self_order[0]
self_order[0] = self_order[n - 1]
self_order[n - 1] = tmp
else:
tmp = self_order[i]
self_order[i] = self_order[n - 1]
self_order[n - 1] = tmp
def _make_unistrokes(self):
# Create unistroke permutations from self.strokes
unistrokes = []
unistrokes_append = unistrokes.append
self_strokes = self.strokes
for r in self._orders:
b = 0
while b < pow(2, len(r)): # use b's bits for directions
unistroke = []
unistroke_append = unistroke.append
for i in xrange(0, len(r)):
pts = self_strokes[r[i]][:]
if (b >> i) & 1 == 1: # is b's bit at index i 1?
pts.reverse()
unistroke_append(None)
unistroke[-1:] = pts
unistrokes_append(unistroke)
b += 1
return unistrokes
# -----------------------------------------------------------------------------
# UnistrokeTemplate
# -----------------------------------------------------------------------------
class UnistrokeTemplate(object):
'''Represents a (uni)stroke path as a list of Vectors. Normally, this class
is instantiated by MultistrokeGesture and not by the programmer directly.
However, it is possible to manually compose UnistrokeTemplate objects.
:Arguments:
`name`
Identifies the name of the gesture. This is normally inherited from
the parent MultistrokeGesture object when a template is generated.
`points`
A list of points that represents a unistroke path. This is normally
one of the possible stroke order permutations from a
MultistrokeGesture.
`numpoints`
The number of points this template should (ideally) be resampled to
before the matching process. The default is 16, but you can use a
template-specific settings if that improves results.
`orientation_sensitive`
Determines if this template is orientation sensitive (True) or
fully rotation invariant (False). The default is True.
.. Note::
You will get an exception if you set a skip-flag and then attempt to
retrieve those vectorsa.
'''
def __init__(self, name, points=None, **kwargs):
self.name = name
self.numpoints = kwargs.get('numpoints', 16)
self.orientation_sens = kwargs.get('orientation_sensitive', True)
self.db = {}
self.points = []
if points is not None:
self.points = points
def add_point(self, p):
'''Add a point to the unistroke/path. This invalidates all previously
computed vectors.'''
self.points.append(p)
# All previously computed data is now void.
self.db = {}
# Used to lazily prepare the template
def _get_db_key(self, key, numpoints=None):
n = numpoints and numpoints or self.numpoints
if n not in self.db:
self.prepare(n)
return self.db[n][key]
def get_start_unit_vector(self, numpoints=None):
return self._get_db_key('startvector', numpoints)
def get_vector(self, numpoints=None):
return self._get_db_key('vector', numpoints)
def get_points(self, numpoints=None):
return self._get_db_key('points', numpoints)
def prepare(self, numpoints=None):
'''This function prepares the UnistrokeTemplate for matching given a
target number of points (for resample). 16 is optimal.'''
if not self.points:
raise MultistrokeError('prepare() called without self.points')
# How many points are we resampling to?
n = numpoints or self.numpoints
if not n or n < 2:
raise MultistrokeError('prepare() called with invalid numpoints')
p = resample(self.points, n)
radians = indicative_angle(p)
p = rotate_by(p, -radians)
p = scale_dim(p, SQUARESIZE, ONEDTHRESHOLD)
if self.orientation_sens:
p = rotate_by(p, +radians) # restore
p = translate_to(p, ORIGIN)
# Now store it using the number of points in the resampled path as the
# dict key. On the next call to get_*, it will be returned instead of
# recomputed. Implicitly, you must reset self.db or call prepare() for
# all the keys once you manipulate self.points.
self.db[n] = {
# Compute STARTANGLEINDEX as n/8:
'startvector': start_unit_vector(p, (n / 8)),
'vector': vectorize(p, self.orientation_sens)
}
# -----------------------------------------------------------------------------
# Candidate
# -----------------------------------------------------------------------------
class Candidate(object):
'''Represents a set of unistroke paths of user input, ie data to be matched
against a :class:`UnistrokeTemplate` object using the Protractor algorithm.
By default, data is precomputed to match both rotation bounded and fully
invariant :class:`UnistrokeTemplate` objects.
:Arguments:
`strokes`
See :data:`MultistrokeGesture.strokes` for format example. The
Candidate strokes are simply combined to a unistroke in the order
given. The idea is that this will match one of the unistroke
permutations in `MultistrokeGesture.templates`.
`numpoints`
The Candidate's default N; this is only for a fallback, it is not
normally used since n is driven by the UnistrokeTemplate we are
being compared to.
`skip_bounded`
If True, do not generate/store rotation bounded vectors
`skip_invariant`
If True, do not generate/store rotation invariant vectors
Note that you WILL get errors if you set a skip-flag and then attempt to
retrieve the data.'''
def __init__(self, strokes=None, numpoints=16, **kwargs):
self.skip_invariant = kwargs.get('skip_invariant', False)
self.skip_bounded = kwargs.get('skip_bounded', False)
self.numpoints = numpoints
self.db = {}
self.strokes = []
if not strokes is None:
self.strokes = strokes
def add_stroke(self, stroke):
'''Add a stroke to the candidate; this will invalidate all
previously computed vectors'''
self.points.append(stroke)
self.db = {}
# Used to lazily prepare the candidate
def _get_db_key(self, key, numpoints, orientation_sens):
n = numpoints and numpoints or self.numpoints
if n not in self.db:
self.prepare(n)
prefix = orientation_sens and 'bound_' or 'inv_'
return self.db[n][prefix + key]
def get_start_unit_vector(self, numpoints, orientation_sens):
'''(Internal use only) Get the start vector for this Candidate,
with the path resampled to `numpoints` points. This is the first
step in the matching process. It is compared to a
UnistrokeTemplate object's start vector to determine angle
similarity.'''
return self._get_db_key('startvector', numpoints, orientation_sens)
def get_protractor_vector(self, numpoints, orientation_sens):
'''(Internal use only) Return vector for comparing to a
UnistrokeTemplate with Protractor'''
return self._get_db_key('vector', numpoints, orientation_sens)
def get_angle_similarity(self, tpl, **kwargs):
'''(Internal use only) Compute the angle similarity between this
Candidate and a UnistrokeTemplate object. Returns a number that
represents the angle similarity (lower is more similar).'''
n = kwargs.get('numpoints', self.numpoints)
# angle_between_unit_vectors() inlined here for performance
v1x, v1y = self.get_start_unit_vector(n, tpl.orientation_sens)
v2x, v2y = tpl.get_start_unit_vector(n)
n = (v1x * v2x + v1y * v2y)
# FIXME: Domain error on float representation of 1.0 (exact match)
# (see comments in MultistrokeGesture.get_distance())
if n >= 1:
return 0.0
if n <= -1:
return pi
return acos(n)
def prepare(self, numpoints=None):
'''Prepare the Candidate vectors. self.strokes is combined to a single
unistroke (connected end-to-end), resampled to :attr:`numpoints` points,
and then the vectors are calculated and stored in self.db (for use by
`get_distance` and `get_angle_similarity`)'''
n = numpoints and numpoints or self.numpoints
# Inlined combine_strokes() for performance
points = [i for sub in self.strokes for i in sub]
points = resample(points, n)
radians = indicative_angle(points)
points = rotate_by(points, -radians)
points = scale_dim(points, SQUARESIZE, ONEDTHRESHOLD)
# Compute STARTANGLEINDEX as n / 8
angidx = n / 8
cand = {}
# full rotation invariance
if not self.skip_invariant:
inv_points = translate_to(points, ORIGIN)
cand['inv_startvector'] = start_unit_vector(inv_points, angidx)
cand['inv_vector'] = vectorize(inv_points, False)
# rotation bounded invariance
if not self.skip_bounded:
bound_points = rotate_by(points, +radians) # restore
bound_points = translate_to(bound_points, ORIGIN)
cand['bound_startvector'] = start_unit_vector(bound_points, angidx)
cand['bound_vector'] = vectorize(bound_points, True)
self.db[n] = cand
# -----------------------------------------------------------------------------
# Helper functions from this point on. This is all directly related to the
# recognition algorithm, and is almost 100% transcription from the JavaScript
# -----------------------------------------------------------------------------
def resample(points, n):
# Resample a path to `n` points
if not len(points) or not n or n < 2:
raise MultistrokeError('resample() called with invalid arguments')
interval = path_length(points) / (n - 1)
D = 0.0
i = 1
newpoints = [points[0]]
workpoints = points[:]
newpoints_len = 1
workpoints_len = len(points)
new_append = newpoints.append
work_insert = workpoints.insert
while i < len(workpoints):
p1 = workpoints[i - 1]
p2 = workpoints[i]
d = distance(p1, p2)
if D + d >= interval:
qx = p1[0] + ((interval - D) / d) * (p2[0] - p1[0])
qy = p1[1] + ((interval - D) / d) * (p2[1] - p1[1])
q = Vector(qx, qy)
new_append(q)
work_insert(i, q) # q is the next i
newpoints_len += 1
workpoints_len += 1
D = 0.0
else:
D += d
i += 1
# rounding error; insert the last point
if newpoints_len < n:
new_append(points[-1])
return newpoints
def indicative_angle(points):
cx, cy = centroid(points)
return atan2(cy - points[0][1], cx - points[0][0])
def rotate_by(points, radians):
# Rotate points around centroid
cx, cy = centroid(points)
cos = math_cos(radians)
sin = math_sin(radians)
newpoints = []
newpoints_append = newpoints.append
for i in xrange(0, len(points)):
qx = (points[i][0] - cx) * cos - (points[i][1] - cy) * sin + cx
qy = (points[i][0] - cx) * sin + (points[i][1] - cy) * cos + cy
newpoints_append(Vector(qx, qy))
return newpoints
def scale_dim(points, size, oneDratio):
bbox_x, bbox_y, bbox_w, bbox_h = bounding_box(points)
if bbox_h == 0 or bbox_w == 0:
raise MultistrokeError('scale_dim() called with invalid points')
# 1D or 2D gesture test
uniformly = min(bbox_w / bbox_h, bbox_h / bbox_w) <= oneDratio
if uniformly:
qx_size = size / max(bbox_w, bbox_h)
qy_size = size / max(bbox_w, bbox_h)
else:
qx_size = size / bbox_w
qy_size = size / bbox_h
newpoints = []
newpoints_append = newpoints.append
for p in points:
qx = p[0] * qx_size
qy = p[1] * qy_size
newpoints_append(Vector(qx, qy))
return newpoints
def translate_to(points, pt):
# Translate points around centroid
cx, cy = centroid(points)
ptx, pty = pt
newpoints = []
for p in points:
qx = p[0] + ptx - cx
qy = p[1] + pty - cy
newpoints.append(Vector(qx, qy))
return newpoints
def vectorize(points, use_bounded_rotation_invariance):
# Helper function for the Protractor algorithm
cos = 1.0
sin = 0.0
if use_bounded_rotation_invariance:
ang = atan2(points[0][1], points[0][0])
bo = (pi / 4.) * floor((ang + pi / 8.) / (pi / 4.))
cos = math_cos(bo - ang)
sin = math_sin(bo - ang)
sum = 0.0
vector = []
vector_len = 0
vector_append = vector.append
for px, py in points:
newx = px * cos - py * sin
newy = py * cos + px * sin
vector_append(newx)
vector_append(newy)
vector_len += 2
sum += newx ** 2 + newy ** 2
magnitude = sqrt(sum)
for i in xrange(0, vector_len):
vector[i] /= magnitude
return vector
def centroid(points):
x = 0.0
y = 0.0
points_len = len(points)
for i in xrange(0, points_len):
x += points[i][0]
y += points[i][1]
x /= points_len
y /= points_len
return Vector(x, y)
def bounding_box(points):
minx = float('infinity')
miny = float('infinity')
maxx = float('-infinity')
maxy = float('-infinity')
for px, py in points:
if px < minx:
minx = px
if px > maxx:
maxx = px
if py < miny:
miny = py
if py > maxy:
maxy = py
return (minx, miny, maxx - minx, maxy - miny)
def path_length(points):
d = 0.0
for i in xrange(1, len(points)):
d += distance(points[i - 1], points[i])
return d
def distance(p1, p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return sqrt(dx ** 2 + dy ** 2)
def start_unit_vector(points, index):
i = int(index)
vx, vy = points[i][0] - points[0][0], points[i][1] - points[0][1]
length = sqrt(vx ** 2 + vy ** 2)
return Vector(vx / length, vy / length)
|
jswope00/GAI
|
refs/heads/master
|
lms/djangoapps/instructor/management/commands/dump_grades.py
|
18
|
#!/usr/bin/python
#
# django management command: dump grades to csv files
# for use by batch processes
import csv
from instructor.views.legacy import get_student_grade_summary_data
from courseware.courses import get_course_by_id
from xmodule.modulestore.django import modulestore
from django.core.management.base import BaseCommand
from instructor.utils import DummyRequest
class Command(BaseCommand):
help = "dump grades to CSV file. Usage: dump_grades course_id_or_dir filename dump_type\n"
help += " course_id_or_dir: either course_id or course_dir\n"
help += " filename: where the output CSV is to be stored\n"
# help += " start_date: end date as M/D/Y H:M (defaults to end of available data)"
help += " dump_type: 'all' or 'raw' (see instructor dashboard)"
def handle(self, *args, **options):
# current grading logic and data schema doesn't handle dates
# datetime.strptime("21/11/06 16:30", "%m/%d/%y %H:%M")
print "args = ", args
course_id = 'MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section'
fn = "grades.csv"
get_raw_scores = False
if len(args) > 0:
course_id = args[0]
if len(args) > 1:
fn = args[1]
if len(args) > 2:
get_raw_scores = args[2].lower() == 'raw'
request = DummyRequest()
try:
course = get_course_by_id(course_id)
except Exception:
if course_id in modulestore().courses:
course = modulestore().courses[course_id]
else:
print "-----------------------------------------------------------------------------"
print "Sorry, cannot find course %s" % course_id
print "Please provide a course ID or course data directory name, eg content-mit-801rq"
return
print "-----------------------------------------------------------------------------"
print "Dumping grades from %s to file %s (get_raw_scores=%s)" % (course.id, fn, get_raw_scores)
datatable = get_student_grade_summary_data(request, course, course.id, get_raw_scores=get_raw_scores)
fp = open(fn, 'w')
writer = csv.writer(fp, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(datatable['header'])
for datarow in datatable['data']:
encoded_row = [unicode(s).encode('utf-8') for s in datarow]
writer.writerow(encoded_row)
fp.close()
print "Done: %d records dumped" % len(datatable['data'])
|
vjmac15/Lyilis
|
refs/heads/master
|
lib/youtube_dl/extractor/pornhd (VJ Washington's conflicted copy 2017-08-29).py
|
2
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
)
class PornHdIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
_TESTS = [{
'url': 'http://www.pornhd.com/videos/9864/selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
'md5': 'c8b964b1f0a4b5f7f28ae3a5c9f86ad5',
'info_dict': {
'id': '9864',
'display_id': 'selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
'ext': 'mp4',
'title': 'Restroom selfie masturbation',
'description': 'md5:3748420395e03e31ac96857a8f125b2b',
'thumbnail': r're:^https?://.*\.jpg',
'view_count': int,
'age_limit': 18,
}
}, {
# removed video
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'md5': '956b8ca569f7f4d8ec563e2c41598441',
'info_dict': {
'id': '1962',
'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'ext': 'mp4',
'title': 'Sierra loves doing laundry',
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
'thumbnail': r're:^https?://.*\.jpg',
'view_count': int,
'age_limit': 18,
},
'skip': 'Not available anymore',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id or video_id)
title = self._html_search_regex(
[r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
sources = self._parse_json(js_to_json(self._search_regex(
r"(?s)sources'?\s*:\s*(\{.+?\})\s*\}[;,)]",
webpage, 'sources', default='{}')), video_id)
if not sources:
message = self._html_search_regex(
r'(?s)<(div|p)[^>]+class="no-video"[^>]*>(?P<value>.+?)</\1',
webpage, 'error message', group='value')
raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
formats = []
for format_id, video_url in sources.items():
if not video_url:
continue
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': video_url,
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
description = self._html_search_regex(
r'<(div|p)[^>]+class="description"[^>]*>(?P<value>[^<]+)</\1',
webpage, 'description', fatal=False, group='value')
view_count = int_or_none(self._html_search_regex(
r'(\d+) views\s*<', webpage, 'view count', fatal=False))
thumbnail = self._search_regex(
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
'formats': formats,
'age_limit': 18,
}
|
Metaswitch/pjsip-upstream
|
refs/heads/master
|
tests/pjsua/scripts-recvfrom/235_reg_good_tel_uri_enocredential.py
|
57
|
# $Id: 235_reg_good_tel_uri_enocredential.py 3323 2010-09-28 07:43:18Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
pjsua = "--null-audio --id=tel:+12345 --registrar sip:127.0.0.1:$PORT"
req1 = sip.RecvfromTransaction("", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1234\""],
expect="PJSIP_ENOCREDENTIAL"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration with tel: URI test",
pjsua, [req1])
|
webOS-ports/qtwebkit
|
refs/heads/webOS-ports/master
|
Tools/Scripts/webkitpy/common/message_pool.py
|
129
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for handling messages and concurrency for run-webkit-tests
and test-webkitpy. This module follows the design for multiprocessing.Pool
and concurrency.futures.ProcessPoolExecutor, with the following differences:
* Tasks are executed in stateful subprocesses via objects that implement the
Worker interface - this allows the workers to share state across tasks.
* The pool provides an asynchronous event-handling interface so the caller
may receive events as tasks are processed.
If you don't need these features, use multiprocessing.Pool or concurrency.futures
intead.
"""
import cPickle
import logging
import multiprocessing
import Queue
import sys
import time
import traceback
from webkitpy.common.host import Host
from webkitpy.common.system import stack_utils
_log = logging.getLogger(__name__)
def get(caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
"""Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
return _MessagePool(caller, worker_factory, num_workers, worker_startup_delay_secs, host)
class _MessagePool(object):
def __init__(self, caller, worker_factory, num_workers, worker_startup_delay_secs=0.0, host=None):
self._caller = caller
self._worker_factory = worker_factory
self._num_workers = num_workers
self._worker_startup_delay_secs = worker_startup_delay_secs
self._workers = []
self._workers_stopped = set()
self._host = host
self._name = 'manager'
self._running_inline = (self._num_workers == 1)
if self._running_inline:
self._messages_to_worker = Queue.Queue()
self._messages_to_manager = Queue.Queue()
else:
self._messages_to_worker = multiprocessing.Queue()
self._messages_to_manager = multiprocessing.Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self._close()
return False
def run(self, shards):
"""Posts a list of messages to the pool and waits for them to complete."""
for message in shards:
self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
for _ in xrange(self._num_workers):
self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
self.wait()
def _start_workers(self):
assert not self._workers
self._workers_stopped = set()
host = None
if self._running_inline or self._can_pickle(self._host):
host = self._host
for worker_number in xrange(self._num_workers):
worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
self._workers.append(worker)
worker.start()
if self._worker_startup_delay_secs:
time.sleep(self._worker_startup_delay_secs)
def _worker_log_level(self):
log_level = logging.NOTSET
for handler in logging.root.handlers:
if handler.level != logging.NOTSET:
if log_level == logging.NOTSET:
log_level = handler.level
else:
log_level = min(log_level, handler.level)
return log_level
def wait(self):
try:
self._start_workers()
if self._running_inline:
self._workers[0].run()
self._loop(block=False)
else:
self._loop(block=True)
finally:
self._close()
def _close(self):
for worker in self._workers:
if worker.is_alive():
worker.terminate()
worker.join()
self._workers = []
if not self._running_inline:
# FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
multiprocessing.util._exiting = True
if self._messages_to_worker:
self._messages_to_worker.close()
self._messages_to_worker = None
if self._messages_to_manager:
self._messages_to_manager.close()
self._messages_to_manager = None
def _log_messages(self, messages):
for message in messages:
logging.root.handle(message)
def _handle_done(self, source):
self._workers_stopped.add(source)
@staticmethod
def _handle_worker_exception(source, exception_type, exception_value, _):
if exception_type == KeyboardInterrupt:
raise exception_type(exception_value)
raise WorkerException(str(exception_value))
def _can_pickle(self, host):
try:
cPickle.dumps(host)
return True
except TypeError:
return False
def _loop(self, block):
try:
while True:
if len(self._workers_stopped) == len(self._workers):
block = False
message = self._messages_to_manager.get(block)
self._log_messages(message.logs)
if message.from_user:
self._caller.handle(message.name, message.src, *message.args)
continue
method = getattr(self, '_handle_' + message.name)
assert method, 'bad message %s' % repr(message)
method(message.src, *message.args)
except Queue.Empty:
pass
class WorkerException(BaseException):
"""Raised when we receive an unexpected/unknown exception from a worker."""
pass
class _Message(object):
def __init__(self, src, message_name, message_args, from_user, logs):
self.src = src
self.name = message_name
self.args = message_args
self.from_user = from_user
self.logs = logs
def __repr__(self):
return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
class _Worker(multiprocessing.Process):
def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
super(_Worker, self).__init__()
self.host = host
self.worker_number = worker_number
self.name = 'worker/%d' % worker_number
self.log_messages = []
self.log_level = log_level
self._running_inline = running_inline
self._manager = manager
self._messages_to_manager = messages_to_manager
self._messages_to_worker = messages_to_worker
self._worker = worker_factory(self)
self._logger = None
self._log_handler = None
def terminate(self):
if self._worker:
if hasattr(self._worker, 'stop'):
self._worker.stop()
self._worker = None
if self.is_alive():
super(_Worker, self).terminate()
def _close(self):
if self._log_handler and self._logger:
self._logger.removeHandler(self._log_handler)
self._log_handler = None
self._logger = None
def start(self):
if not self._running_inline:
super(_Worker, self).start()
def run(self):
if not self.host:
self.host = Host()
if not self._running_inline:
self._set_up_logging()
worker = self._worker
exception_msg = ""
_log.debug("%s starting" % self.name)
try:
if hasattr(worker, 'start'):
worker.start()
while True:
message = self._messages_to_worker.get()
if message.from_user:
worker.handle(message.name, message.src, *message.args)
self._yield_to_manager()
else:
assert message.name == 'stop', 'bad message %s' % repr(message)
break
_log.debug("%s exiting" % self.name)
except Queue.Empty:
assert False, '%s: ran out of messages in worker queue.' % self.name
except KeyboardInterrupt, e:
self._raise(sys.exc_info())
except Exception, e:
self._raise(sys.exc_info())
finally:
try:
if hasattr(worker, 'stop'):
worker.stop()
finally:
self._post(name='done', args=(), from_user=False)
self._close()
def post(self, name, *args):
self._post(name, args, from_user=True)
self._yield_to_manager()
def _yield_to_manager(self):
if self._running_inline:
self._manager._loop(block=False)
def _post(self, name, args, from_user):
log_messages = self.log_messages
self.log_messages = []
self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
def _raise(self, exc_info):
exception_type, exception_value, exception_traceback = exc_info
if self._running_inline:
raise exception_type, exception_value, exception_traceback
if exception_type == KeyboardInterrupt:
_log.debug("%s: interrupted, exiting" % self.name)
stack_utils.log_traceback(_log.debug, exception_traceback)
else:
_log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
stack_utils.log_traceback(_log.error, exception_traceback)
# Since tracebacks aren't picklable, send the extracted stack instead.
stack = traceback.extract_tb(exception_traceback)
self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
def _set_up_logging(self):
self._logger = logging.getLogger()
# The unix multiprocessing implementation clones any log handlers into the child process,
# so we remove them to avoid duplicate logging.
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._log_handler = _WorkerLogHandler(self)
self._logger.addHandler(self._log_handler)
self._logger.setLevel(self.log_level)
class _WorkerLogHandler(logging.Handler):
def __init__(self, worker):
logging.Handler.__init__(self)
self._worker = worker
self.setLevel(worker.log_level)
def emit(self, record):
self._worker.log_messages.append(record)
|
pablomuri/Engine
|
refs/heads/python-core2
|
libraries/netip/python/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.