repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
mavit/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/na_elementsw_volume_clone.py
|
9
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
"""Element Software volume clone"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_elementsw_volume_clone
short_description: NetApp Element Software Create Volume Clone
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Create volume clones on Element OS
options:
name:
description:
- The name of the clone.
required: true
src_volume_id:
description:
- The id of the src volume to clone. id may be a numeric identifier or a volume name.
required: true
src_snapshot_id:
description:
- The id of the snapshot to clone. id may be a numeric identifier or a snapshot name.
account_id:
description:
- Account ID for the owner of this cloned volume. id may be a numeric identifier or an account name.
required: true
attributes:
description: A YAML dictionary of attributes that you would like to apply on this cloned volume.
size:
description:
- The size of the cloned volume in (size_unit).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
description:
- Access allowed for the volume.
- If unspecified, the access settings of the clone will be the same as the source.
- readOnly - Only read operations are allowed.
- readWrite - Reads and writes are allowed.
- locked - No reads or writes are allowed.
- replicationTarget - Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked.
'''
EXAMPLES = """
- name: Clone Volume
na_elementsw_volume_clone:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
name: CloneAnsibleVol
src_volume_id: 123
src_snapshot_id: 41
account_id: 3
size: 1
size_unit: gb
access: readWrite
attributes: {"virtual_network_id": 12345}
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_elementsw_module import NaElementSWModule
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class ElementOSVolumeClone(object):
"""
Contains methods to parse arguments,
derive details of Element Software objects
and send requests to Element OS via
the Solidfire SDK
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check paramenters and ensure SDK is installed
"""
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True),
src_volume_id=dict(required=True),
src_snapshot_id=dict(),
account_id=dict(required=True),
attributes=dict(type='dict', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(type='str',
default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.name = parameters['name']
self.src_volume_id = parameters['src_volume_id']
self.src_snapshot_id = parameters['src_snapshot_id']
self.account_id = parameters['account_id']
self.attributes = parameters['attributes']
self.size_unit = parameters['size_unit']
if parameters['size'] is not None:
self.size = parameters['size'] * \
self._size_unit_map[self.size_unit]
else:
self.size = None
self.access = parameters['access']
if HAS_SF_SDK is False:
self.module.fail_json(
msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
self.elementsw_helper = NaElementSWModule(self.sfe)
# add telemetry attributes
if self.attributes is not None:
self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone'))
else:
self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone')
def get_account_id(self):
"""
Return account id if found
"""
try:
# Update and return self.account_id
self.account_id = self.elementsw_helper.account_exists(self.account_id)
return self.account_id
except Exception as err:
self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
def get_snapshot_id(self):
"""
Return snapshot details if found
"""
src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
# Update and return self.src_snapshot_id
if src_snapshot is not None:
self.src_snapshot_id = src_snapshot.snapshot_id
# Return src_snapshot
return self.src_snapshot_id
return None
def get_src_volume_id(self):
"""
Return volume id if found
"""
src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
if src_vol_id is not None:
# Update and return self.volume_id
self.src_volume_id = src_vol_id
# Return src_volume_id
return self.src_volume_id
return None
def clone_volume(self):
"""Clone Volume from source"""
try:
self.sfe.clone_volume(volume_id=self.src_volume_id,
name=self.name,
new_account_id=self.account_id,
new_size=self.size,
access=self.access,
snapshot_id=self.src_snapshot_id,
attributes=self.attributes)
except Exception as err:
self.module.fail_json(msg="Error creating clone %s of size %s" % (self.name, self.size), exception=to_native(err))
def apply(self):
"""Perform pre-checks, call functions and exit"""
changed = False
result_message = ""
if self.get_account_id() is None:
self.module.fail_json(msg="Account id not found: %s" % (self.account_id))
# there is only one state. other operations
# are part of the volume module
# ensure that a volume with the clone name
# isn't already present
if self.elementsw_helper.volume_exists(self.name, self.account_id) is None:
# check for the source volume
if self.get_src_volume_id() is not None:
# check for a valid snapshot
if self.src_snapshot_id and not self.get_snapshot_id():
self.module.fail_json(msg="Snapshot id not found: %s" % (self.src_snapshot_id))
# change required
changed = True
else:
self.module.fail_json(msg="Volume id not found %s" % (self.src_volume_id))
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
else:
self.clone_volume()
result_message = "Volume cloned"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""Create object and call apply"""
volume_clone = ElementOSVolumeClone()
volume_clone.apply()
if __name__ == '__main__':
main()
|
zozo123/buildbot
|
refs/heads/master
|
master/buildbot/test/unit/test_contrib_buildbot_cvs_mail.py
|
3
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import re
import sys
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import utils
from twisted.python import log
from twisted.trial import unittest
test = '''
Update of /cvsroot/test
In directory example:/tmp/cvs-serv21085
Modified Files:
README hello.c
Log Message:
two files checkin
'''
golden_1_11_regex = [
'^From:',
'^To: buildbot@example.com$',
'^Reply-To: noreply@example.com$',
'^Subject: cvs update for project test$',
'^Date:',
'^X-Mailer: Python buildbot-cvs-mail',
'^$',
'^Cvsmode: 1.11$',
'^Category: None',
'^CVSROOT: \"ext:example:/cvsroot\"',
'^Files: test README 1.1,1.2 hello.c 2.2,2.3$',
'^Project: test$',
'^$',
'^Update of /cvsroot/test$',
'^In directory example:/tmp/cvs-serv21085$',
'^$',
'^Modified Files:$',
'README hello.c$',
'Log Message:$',
'^two files checkin',
'^$',
'^$']
golden_1_12_regex = [
'^From: ',
'^To: buildbot@example.com$',
'^Reply-To: noreply@example.com$',
'^Subject: cvs update for project test$',
'^Date: ',
'^X-Mailer: Python buildbot-cvs-mail',
'^$',
'^Cvsmode: 1.12$',
'^Category: None$',
'^CVSROOT: \"ext:example.com:/cvsroot\"$',
'^Files: README 1.1 1.2 hello.c 2.2 2.3$',
'^Path: test$',
'^Project: test$',
'^$',
'^Update of /cvsroot/test$',
'^In directory example:/tmp/cvs-serv21085$',
'^$',
'^Modified Files:$',
'README hello.c$',
'^Log Message:$',
'two files checkin',
'^$',
'^$']
class _SubprocessProtocol(protocol.ProcessProtocol):
def __init__(self, input, deferred):
self.input = input
self.deferred = deferred
self.output = ''
def outReceived(self, s):
self.output += s
errReceived = outReceived
def connectionMade(self):
# push the input and send EOF
self.transport.write(self.input)
self.transport.closeStdin()
def processEnded(self, reason):
self.deferred.callback((self.output, reason.value.exitCode))
def getProcessOutputAndValueWithInput(executable, args, input):
"similar to getProcessOutputAndValue, but also allows injection of input on stdin"
d = defer.Deferred()
p = _SubprocessProtocol(input, d)
reactor.spawnProcess(p, executable, (executable,) + tuple(args))
return d
class TestBuildbotCvsMail(unittest.TestCase):
buildbot_cvs_mail_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../contrib/buildbot_cvs_mail.py'))
if not os.path.exists(buildbot_cvs_mail_path):
skip = ("'%s' does not exist (normal unless run from git)"
% buildbot_cvs_mail_path)
def assertOutputOk(self, xxx_todo_changeme4, regexList):
"assert that the output from getProcessOutputAndValueWithInput matches expectations"
(output, code) = xxx_todo_changeme4
try:
self.failUnlessEqual(code, 0, "subprocess exited uncleanly")
lines = output.splitlines()
self.failUnlessEqual(len(lines), len(regexList),
"got wrong number of lines of output")
misses = []
for line, regex in zip(lines, regexList):
m = re.search(regex, line)
if not m:
misses.append((regex, line))
self.assertEqual(misses, [], "got non-matching lines")
except:
log.msg("got output:\n" + output)
raise
def test_buildbot_cvs_mail_from_cvs1_11(self):
# Simulate CVS 1.11
d = getProcessOutputAndValueWithInput(sys.executable,
[self.buildbot_cvs_mail_path, '--cvsroot=\"ext:example:/cvsroot\"',
'--email=buildbot@example.com', '-P', 'test', '-R', 'noreply@example.com', '-t',
'test', 'README', '1.1,1.2', 'hello.c', '2.2,2.3'],
input=test)
d.addCallback(self.assertOutputOk, golden_1_11_regex)
return d
def test_buildbot_cvs_mail_from_cvs1_12(self):
# Simulate CVS 1.12, with --path option
d = getProcessOutputAndValueWithInput(sys.executable,
[self.buildbot_cvs_mail_path, '--cvsroot=\"ext:example.com:/cvsroot\"',
'--email=buildbot@example.com', '-P', 'test', '--path', 'test',
'-R', 'noreply@example.com', '-t',
'README', '1.1', '1.2', 'hello.c', '2.2', '2.3'],
input=test)
d.addCallback(self.assertOutputOk, golden_1_12_regex)
return d
def test_buildbot_cvs_mail_no_args_exits_with_error(self):
d = utils.getProcessOutputAndValue(sys.executable, [self.buildbot_cvs_mail_path])
def check(xxx_todo_changeme):
(stdout, stderr, code) = xxx_todo_changeme
self.assertEqual(code, 2)
d.addCallback(check)
return d
def test_buildbot_cvs_mail_without_email_opt_exits_with_error(self):
d = utils.getProcessOutputAndValue(sys.executable, [self.buildbot_cvs_mail_path,
'--cvsroot=\"ext:example.com:/cvsroot\"',
'-P', 'test', '--path', 'test',
'-R', 'noreply@example.com', '-t',
'README', '1.1', '1.2', 'hello.c', '2.2', '2.3'])
def check(xxx_todo_changeme1):
(stdout, stderr, code) = xxx_todo_changeme1
self.assertEqual(code, 2)
d.addCallback(check)
return d
def test_buildbot_cvs_mail_without_cvsroot_opt_exits_with_error(self):
d = utils.getProcessOutputAndValue(sys.executable, [self.buildbot_cvs_mail_path,
'--complete-garbage-opt=gomi',
'--cvsroot=\"ext:example.com:/cvsroot\"',
'--email=buildbot@example.com', '-P', 'test', '--path',
'test', '-R', 'noreply@example.com', '-t',
'README', '1.1', '1.2', 'hello.c', '2.2', '2.3'])
def check(xxx_todo_changeme2):
(stdout, stderr, code) = xxx_todo_changeme2
self.assertEqual(code, 2)
d.addCallback(check)
return d
def test_buildbot_cvs_mail_with_unknown_opt_exits_with_error(self):
d = utils.getProcessOutputAndValue(sys.executable, [self.buildbot_cvs_mail_path,
'--email=buildbot@example.com', '-P', 'test', '--path',
'test', '-R', 'noreply@example.com', '-t',
'README', '1.1', '1.2', 'hello.c', '2.2', '2.3'])
def check(xxx_todo_changeme3):
(stdout, stderr, code) = xxx_todo_changeme3
self.assertEqual(code, 2)
d.addCallback(check)
return d
|
aioue/ansible
|
refs/heads/devel
|
lib/ansible/parsing/splitter.py
|
59
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import re
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.", orig_exc=ve)
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for (itemidx, item) in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for (idx, token) in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes: {}".format(args))
return params
|
chongtianfeiyu/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/xml/etree/cElementTree.py
|
876
|
# Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
|
klaus385/openpilot
|
refs/heads/devel
|
selfdrive/controls/lib/alertmanager.py
|
1
|
from cereal import car, log
from selfdrive.swaglog import cloudlog
from common.realtime import sec_since_boot
import copy
# Priority
class Priority:
HIGH = 3
MID = 2
LOW = 1
LOWEST = 0
AlertSize = log.Live100Data.AlertSize
AlertStatus = log.Live100Data.AlertStatus
class Alert(object):
def __init__(self,
alert_text_1,
alert_text_2,
alert_status,
alert_size,
alert_priority,
visual_alert,
audible_alert,
duration_sound,
duration_hud_alert,
duration_text):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert if visual_alert is not None else "none"
self.audible_alert = audible_alert if audible_alert is not None else "none"
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.start_time = 0.
# typecheck that enums are valid on startup
tst = car.CarControl.new_message()
tst.hudControl.visualAlert = self.visual_alert
tst.hudControl.audibleAlert = self.audible_alert
def __str__(self):
return self.alert_text_1 + "/" + self.alert_text_2 + " " + str(self.alert_priority) + " " + str(
self.visual_alert) + " " + str(self.audible_alert)
def __gt__(self, alert2):
return self.alert_priority > alert2.alert_priority
class AlertManager(object):
alerts = {
# Miscellaneous alerts
"enable": Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, None, "beepSingle", .2, 0., 0.),
"disable": Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, None, "beepSingle", .2, 0., 0.),
"fcw": Alert(
"Brake!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "fcw", "chimeRepeated", 1., 2., 2.),
"steerSaturated": Alert(
"TAKE CONTROL",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, "steerRequired", "chimeSingle", 1., 2., 3.),
"steerTempUnavailable": Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, "steerRequired", "chimeDouble", .4, 2., 3.),
"preDriverDistracted": Alert(
"TAKE CONTROL",
"User Appears Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, "steerRequired", None, 0., .1, .1),
"driverDistracted": Alert(
"TAKE CONTROL TO REGAIN SPEED",
"User Appears Distracted",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", .1, .1, .1),
"startup": Alert(
"Be ready to take over at any time",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, None, None, 0., 0., 15.),
"ethicalDilemma": Alert(
"TAKE CONTROL IMMEDIATELY",
"Ethical Dilemma Detected",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 3.),
"steerTempUnavailableNoEntry": Alert(
"openpilot Unavailable",
"Steering Temporarily Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 0., 3.),
"manualRestart": Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, None, None, 0., 0., .2),
# Non-entry only alerts
"wrongCarModeNoEntry": Alert(
"openpilot Unavailable",
"Main Switch Off",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 0., 3.),
"dataNeededNoEntry": Alert(
"openpilot Unavailable",
"Data Needed for Calibration. Upload Drive, Try Again",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 0., 3.),
"outOfSpaceNoEntry": Alert(
"openpilot Unavailable",
"Out of Storage Space",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 0., 3.),
"pedalPressedNoEntry": Alert(
"openpilot Unavailable",
"Pedal Pressed During Attempt",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, "brakePressed", "chimeDouble", .4, 2., 3.),
"speedTooLowNoEntry": Alert(
"openpilot Unavailable",
"Speed Too Low",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"brakeHoldNoEntry": Alert(
"openpilot Unavailable",
"Brake Hold Active",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"parkBrakeNoEntry": Alert(
"openpilot Unavailable",
"Park Brake Engaged",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"lowSpeedLockoutNoEntry": Alert(
"openpilot Unavailable",
"Cruise Fault: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
# Cancellation alerts causing soft disabling
"overheat": Alert(
"TAKE CONTROL IMMEDIATELY",
"System Overheated",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"wrongGear": Alert(
"TAKE CONTROL IMMEDIATELY",
"Gear not D",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"calibrationInvalid": Alert(
"TAKE CONTROL IMMEDIATELY",
"Calibration Invalid: Reposition EON and Recalibrate",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"calibrationInProgress": Alert(
"TAKE CONTROL IMMEDIATELY",
"Calibration in Progress",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"doorOpen": Alert(
"TAKE CONTROL IMMEDIATELY",
"Door Open",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"seatbeltNotLatched": Alert(
"TAKE CONTROL IMMEDIATELY",
"Seatbelt Unlatched",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
"espDisabled": Alert(
"TAKE CONTROL IMMEDIATELY",
"ESP Off",
AlertStatus.critical, AlertSize.full,
Priority.MID, "steerRequired", "chimeRepeated", 1., 3., 3.),
# Cancellation alerts causing immediate disabling
"radarCommIssue": Alert(
"TAKE CONTROL IMMEDIATELY",
"Radar Error: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"radarFault": Alert(
"TAKE CONTROL IMMEDIATELY",
"Radar Error: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"modelCommIssue": Alert(
"TAKE CONTROL IMMEDIATELY",
"Model Error: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"controlsFailed": Alert(
"TAKE CONTROL IMMEDIATELY",
"Controls Failed",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"controlsMismatch": Alert(
"TAKE CONTROL IMMEDIATELY",
"Controls Mismatch",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"commIssue": Alert(
"TAKE CONTROL IMMEDIATELY",
"CAN Error: Check Connections",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"steerUnavailable": Alert(
"TAKE CONTROL IMMEDIATELY",
"Steer Fault: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"brakeUnavailable": Alert(
"TAKE CONTROL IMMEDIATELY",
"Brake Fault: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"gasUnavailable": Alert(
"TAKE CONTROL IMMEDIATELY",
"Gas Fault: Restart the Car",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"reverseGear": Alert(
"TAKE CONTROL IMMEDIATELY",
"Reverse Gear",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"cruiseDisabled": Alert(
"TAKE CONTROL IMMEDIATELY",
"Cruise Is Off",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
"plannerError": Alert(
"TAKE CONTROL IMMEDIATELY",
"Planner Solution Error",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, "steerRequired", "chimeRepeated", 1., 3., 4.),
# not loud cancellations (user is in control)
"noTarget": Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, None, "chimeDouble", .4, 2., 3.),
"speedTooLow": Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, None, "chimeDouble", .4, 2., 3.),
# Cancellation alerts causing non-entry
"overheatNoEntry": Alert(
"openpilot Unavailable",
"System overheated",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"wrongGearNoEntry": Alert(
"openpilot Unavailable",
"Gear not D",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"calibrationInvalidNoEntry": Alert(
"openpilot Unavailable",
"Calibration Invalid: Reposition EON and Recalibrate",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"calibrationInProgressNoEntry": Alert(
"openpilot Unavailable",
"Calibration in Progress",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"doorOpenNoEntry": Alert(
"openpilot Unavailable",
"Door open",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"seatbeltNotLatchedNoEntry": Alert(
"openpilot Unavailable",
"Seatbelt unlatched",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"espDisabledNoEntry": Alert(
"openpilot Unavailable",
"ESP Off",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"radarCommIssueNoEntry": Alert(
"openpilot Unavailable",
"Radar Error: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"radarFaultNoEntry": Alert(
"openpilot Unavailable",
"Radar Error: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"modelCommIssueNoEntry": Alert(
"openpilot Unavailable",
"Model Error: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"controlsFailedNoEntry": Alert(
"openpilot Unavailable",
"Controls Failed",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"commIssueNoEntry": Alert(
"openpilot Unavailable",
"CAN Error: Check Connections",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"steerUnavailableNoEntry": Alert(
"openpilot Unavailable",
"Steer Fault: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"brakeUnavailableNoEntry": Alert(
"openpilot Unavailable",
"Brake Fault: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"gasUnavailableNoEntry": Alert(
"openpilot Unavailable",
"Gas Error: Restart the Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"reverseGearNoEntry": Alert(
"openpilot Unavailable",
"Reverse Gear",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"cruiseDisabledNoEntry": Alert(
"openpilot Unavailable",
"Cruise is Off",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"noTargetNoEntry": Alert(
"openpilot Unavailable",
"No Close Lead Car",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
"plannerErrorNoEntry": Alert(
"openpilot Unavailable",
"Planner Solution Error",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, None, "chimeDouble", .4, 2., 3.),
# permanent alerts to display on small UI upper box
"steerUnavailablePermanent": Alert(
"STEER FAULT: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, None, None, 0., 0., .2),
"brakeUnavailablePermanent": Alert(
"BRAKE FAULT: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, None, None, 0., 0., .2),
"lowSpeedLockoutPermanent": Alert(
"CRUISE FAULT: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, None, None, 0., 0., .2),
}
def __init__(self):
self.activealerts = []
def alertPresent(self):
return len(self.activealerts) > 0
def add(self, alert_type, enabled=True, extra_text=''):
alert_type = str(alert_type)
added_alert = copy.copy(self.alerts[alert_type])
added_alert.alert_text_2 += extra_text
added_alert.start_time = sec_since_boot()
# if new alert is higher priority, log it
if not self.alertPresent() or \
added_alert.alert_priority > self.activealerts[0].alert_priority:
cloudlog.event('alert_add',
alert_type=alert_type,
enabled=enabled)
self.activealerts.append(added_alert)
# sort by priority first and then by start_time
self.activealerts.sort(key=lambda k: (k.alert_priority, k.start_time), reverse=True)
# TODO: cycle through alerts?
def process_alerts(self, cur_time):
# first get rid of all the expired alerts
self.activealerts = [a for a in self.activealerts if a.start_time +
max(a.duration_sound, a.duration_hud_alert, a.duration_text) > cur_time]
ca = self.activealerts[0] if self.alertPresent() else None
# start with assuming no alerts
self.alert_text_1 = ""
self.alert_text_2 = ""
self.alert_status = AlertStatus.normal
self.alert_size = AlertSize.none
self.visual_alert = "none"
self.audible_alert = "none"
if ca:
if ca.start_time + ca.duration_sound > cur_time:
self.audible_alert = ca.audible_alert
if ca.start_time + ca.duration_hud_alert > cur_time:
self.visual_alert = ca.visual_alert
if ca.start_time + ca.duration_text > cur_time:
self.alert_text_1 = ca.alert_text_1
self.alert_text_2 = ca.alert_text_2
self.alert_status = ca.alert_status
self.alert_size = ca.alert_size
|
ladybug-analysis-tools/honeybeex
|
refs/heads/master
|
plugin/src/occschduleweek_node.py
|
3
|
# assign inputs
_occHours_, _offHours_, _weekend_, _defValue_ = IN
schedule = values = None
try:
from honeybee.schedule import Schedule
except ImportError as e:
raise ImportError('\nFailed to import honeybee:\n\t{}'.format(e))
schedule = Schedule.from_workday_hours(
_occHours_, _offHours_, _weekend_, _defValue_)
values = schedule.values
# assign outputs to OUT
OUT = schedule, values
|
tinkerinestudio/Tinkerine-Suite
|
refs/heads/master
|
TinkerineSuite/python/Lib/numpy/distutils/mingw32ccompiler.py
|
76
|
"""
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
import os
import subprocess
import sys
import subprocess
import re
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
import log
else:
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
verbose,dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search('(\d+\.\d+)',out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# **changes: eric jones 4/11/01
# 2. increased optimization and turned off all warnings
# 3. also added --driver-name g++
#self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
# compiler_so='gcc -mno-cygwin -mdll -O2 -w',
# linker_exe='gcc -mno-cygwin',
# linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
# % (self.linker, entry_point))
# MS_WIN64 should be defined when building for amd64 on windows, but
# python headers define it only for MS compilers, which has all kind of
# bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s'
% (self.linker, entry_point))
elif self.gcc_version < "4.":
self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropiate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
if sys.version_info[0] >= 3:
func(*args[:func.__code__.co_argcount])
else:
func(*args[:func.im_func.func_code.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv,base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
maj, min, micro = [int(i) for i in sys.version_info[:3]]
dllname = 'python%d%d.dll' % (maj, min)
print ("Looking for %s" % dllname)
# We can't do much here:
# - find it in python main dir
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
lib_dirs = []
lib_dirs.append(os.path.join(sys.prefix, 'lib'))
try:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))
except KeyError:
pass
for d in lib_dirs:
dll = os.path.join(d, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i]):
break
if i == len(dump):
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j])
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _build_import_library_amd64():
dll_file = find_python_dll()
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix,'libs',def_name)
log.info('Building import library (arch=AMD64): "%s" (from %s)' \
% (out_file, dll_file))
generate_def(dll_file, def_file)
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix,'libs',lib_name)
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix,'libs',out_name)
if not os.path.isfile(lib_file):
log.warn('Cannot build import library: "%s" not found' % (lib_file))
return
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
log.info('Building import library (ARCH=x86): "%s"' % (out_file))
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix,'libs',def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
args = (dll_name,def_file,out_file)
cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
#if not success:
# msg = "Couldn't find import library, and failed to build it."
# raise DistutilsPlatformError, msg
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
_MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION
else:
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what to do
# in that case: manifest building will fail, but it should not be used in
# that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" \
% (maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = """\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
---------- name: str
name of the manifest file to embed
type: str ('dll', 'exe')
type of the binary which will embed the manifest"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
msvcv = msvc_runtime_library()
if msvcv:
maj = int(msvcv[5:6])
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
# # Write the rc file
# manrc = manifest_rc(manifest_name(self), "exe")
# rc = open(rc_name(self), "w")
# self.temp_files.append(manrc)
# rc.write(manrc)
# rc.close()
|
jiezhu2007/scrapy
|
refs/heads/master
|
scrapy/spiders/feed.py
|
151
|
"""
This module implements the XMLFeedSpider which is the recommended spider to use
for scraping from an XML feed.
See documentation in docs/topics/spiders.rst
"""
from scrapy.spiders import Spider
from scrapy.utils.iterators import xmliter, csviter
from scrapy.utils.spider import iterate_spider_output
from scrapy.selector import Selector
from scrapy.exceptions import NotConfigured, NotSupported
class XMLFeedSpider(Spider):
"""
This class intends to be the base class for spiders that scrape
from XML feeds.
You can choose whether to parse the file using the 'iternodes' iterator, an
'xml' selector, or an 'html' selector. In most cases, it's convenient to
use iternodes, since it's a faster and cleaner.
"""
iterator = 'iternodes'
itertag = 'item'
namespaces = ()
def process_results(self, response, results):
"""This overridable method is called for each result (item or request)
returned by the spider, and it's intended to perform any last time
processing required before returning the results to the framework core,
for example setting the item GUIDs. It receives a list of results and
the response which originated that results. It must return a list of
results (Items or Requests).
"""
return results
def adapt_response(self, response):
"""You can override this function in order to make any changes you want
to into the feed before parsing it. This function must return a
response.
"""
return response
def parse_node(self, response, selector):
"""This method must be overriden with your custom spider functionality"""
if hasattr(self, 'parse_item'): # backward compatibility
return self.parse_item(response, selector)
raise NotImplementedError
def parse_nodes(self, response, nodes):
"""This method is called for the nodes matching the provided tag name
(itertag). Receives the response and an Selector for each node.
Overriding this method is mandatory. Otherwise, you spider won't work.
This method must return either a BaseItem, a Request, or a list
containing any of them.
"""
for selector in nodes:
ret = iterate_spider_output(self.parse_node(response, selector))
for result_item in self.process_results(response, ret):
yield result_item
def parse(self, response):
if not hasattr(self, 'parse_node'):
raise NotConfigured('You must define parse_node method in order to scrape this XML feed')
response = self.adapt_response(response)
if self.iterator == 'iternodes':
nodes = self._iternodes(response)
elif self.iterator == 'xml':
selector = Selector(response, type='xml')
self._register_namespaces(selector)
nodes = selector.xpath('//%s' % self.itertag)
elif self.iterator == 'html':
selector = Selector(response, type='html')
self._register_namespaces(selector)
nodes = selector.xpath('//%s' % self.itertag)
else:
raise NotSupported('Unsupported node iterator')
return self.parse_nodes(response, nodes)
def _iternodes(self, response):
for node in xmliter(response, self.itertag):
self._register_namespaces(node)
yield node
def _register_namespaces(self, selector):
for (prefix, uri) in self.namespaces:
selector.register_namespace(prefix, uri)
class CSVFeedSpider(Spider):
"""Spider for parsing CSV feeds.
It receives a CSV file in a response; iterates through each of its rows,
and calls parse_row with a dict containing each field's data.
You can set some options regarding the CSV file, such as the delimiter, quotechar
and the file's headers.
"""
delimiter = None # When this is None, python's csv module's default delimiter is used
quotechar = None # When this is None, python's csv module's default quotechar is used
headers = None
def process_results(self, response, results):
"""This method has the same purpose as the one in XMLFeedSpider"""
return results
def adapt_response(self, response):
"""This method has the same purpose as the one in XMLFeedSpider"""
return response
def parse_row(self, response, row):
"""This method must be overriden with your custom spider functionality"""
raise NotImplementedError
def parse_rows(self, response):
"""Receives a response and a dict (representing each row) with a key for
each provided (or detected) header of the CSV file. This spider also
gives the opportunity to override adapt_response and
process_results methods for pre and post-processing purposes.
"""
for row in csviter(response, self.delimiter, self.headers, self.quotechar):
ret = iterate_spider_output(self.parse_row(response, row))
for result_item in self.process_results(response, ret):
yield result_item
def parse(self, response):
if not hasattr(self, 'parse_row'):
raise NotConfigured('You must define parse_row method in order to scrape this CSV feed')
response = self.adapt_response(response)
return self.parse_rows(response)
|
infobloxopen/infoblox-netmri
|
refs/heads/master
|
infoblox_netmri/api/broker/v3_1_0/job_broker.py
|
10
|
from ..broker import Broker
class JobBroker(Broker):
controller = "jobs"
def index(self, **kwargs):
"""Lists the available jobs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param JobID: The internal NetMRI identifier for this job.
:type JobID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this job.
:type id: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The name of this job, was entered when the job specification was defined, or when the job was executed from a script.
:type name: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param started_at: The time this job started execution.
:type started_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Job. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return jobs: An array of the Job objects that match the specified input criteria.
:rtype jobs: Array of Job
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this job.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return job: The job identified by the specified id.
:rtype job: Job
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available jobs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param approval_note: The approval note.
:type approval_note: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param approval_note: The approval note.
:type approval_note: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param approved_at: The time when the job was approved.
:type approved_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param approved_at: The time when the job was approved.
:type approved_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param approved_by: The user that approved the job.
:type approved_by: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param approved_by: The user that approved the job.
:type approved_by: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param category: The job category.
:type category: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param category: The job category.
:type category: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param completed_at: The time this job completed execution against all devices.
:type completed_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param completed_at: The time this job completed execution against all devices.
:type completed_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param config_template_id: The internal identifier for the config template.
:type config_template_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param config_template_id: The internal identifier for the config template.
:type config_template_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param config_template_text: The content of the config template.
:type config_template_text: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param config_template_text: The content of the config template.
:type config_template_text: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The time when the job was created.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The time when the job was created.
:type created_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_by: The user that created the job.
:type created_by: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_by: The user that created the job.
:type created_by: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param credential_source: The source of a device credential.
:type credential_source: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param credential_source: The source of a device credential.
:type credential_source: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param description: The description of the job.
:type description: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: The description of the job.
:type description: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this job.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this job.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param input_data: The input data for the job.
:type input_data: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param input_data: The input data for the job.
:type input_data: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param job_specification_id: The internal identifier for the associated Job Specification.
:type job_specification_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param job_specification_id: The internal identifier for the associated Job Specification.
:type job_specification_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param job_type: The type of job. This can be Scheduled, Ad Hoc, or Run Now.
:type job_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param job_type: The type of job. This can be Scheduled, Ad Hoc, or Run Now.
:type job_type: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param last_status_at: The time when the job status was updated.
:type last_status_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param last_status_at: The time when the job status was updated.
:type last_status_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The name of this job, was entered when the job specification was defined, or when the job was executed from a script.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The name of this job, was entered when the job specification was defined, or when the job was executed from a script.
:type name: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param provision_data: Internal data for provisioning jobs.
:type provision_data: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param provision_data: Internal data for provisioning jobs.
:type provision_data: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param run_as: The user that run the job.
:type run_as: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param run_as: The user that run the job.
:type run_as: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param script_id: The internal identifier for the script.
:type script_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param script_id: The internal identifier for the script.
:type script_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param script_language: The language of the script.
:type script_language: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param script_language: The language of the script.
:type script_language: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param script_text: The content of the script.
:type script_text: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param script_text: The content of the script.
:type script_text: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param started_at: The time this job started execution.
:type started_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param started_at: The time this job started execution.
:type started_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param status: The current status of this job, based upon all of the statuses for each device in the job.
:type status: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status: The current status of this job, based upon all of the statuses for each device in the job.
:type status: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param taskflow_revert: The internal workflow name for job reversion using the script.
:type taskflow_revert: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param taskflow_revert: The internal workflow name for job reversion using the script.
:type taskflow_revert: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param transactional_ind: Flag indicating if devices should be reserved during execution of this job.
:type transactional_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param transactional_ind: Flag indicating if devices should be reserved during execution of this job.
:type transactional_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The time when the job was updated.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The time when the job was updated.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Job. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against jobs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: approval_note, approved_at, approved_by, category, completed_at, config_template_id, config_template_text, created_at, created_by, credential_source, description, id, input_data, job_specification_id, job_type, last_status_at, name, provision_data, run_as, script_id, script_language, script_text, started_at, status, taskflow_revert, transactional_ind, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return jobs: An array of the Job objects that match the specified input criteria.
:rtype jobs: Array of Job
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available jobs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: approval_note, approved_at, approved_by, category, completed_at, config_template_id, config_template_text, created_at, created_by, credential_source, description, id, input_data, job_specification_id, job_type, last_status_at, name, provision_data, run_as, script_id, script_language, script_text, started_at, status, taskflow_revert, transactional_ind, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_approval_note: The operator to apply to the field approval_note. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. approval_note: The approval note. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_approval_note: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_approval_note: If op_approval_note is specified, the field named in this input will be compared to the value in approval_note using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_approval_note must be specified if op_approval_note is specified.
:type val_f_approval_note: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_approval_note: If op_approval_note is specified, this value will be compared to the value in approval_note using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_approval_note must be specified if op_approval_note is specified.
:type val_c_approval_note: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_approved_at: The operator to apply to the field approved_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. approved_at: The time when the job was approved. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_approved_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_approved_at: If op_approved_at is specified, the field named in this input will be compared to the value in approved_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_approved_at must be specified if op_approved_at is specified.
:type val_f_approved_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_approved_at: If op_approved_at is specified, this value will be compared to the value in approved_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_approved_at must be specified if op_approved_at is specified.
:type val_c_approved_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_approved_by: The operator to apply to the field approved_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. approved_by: The user that approved the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_approved_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_approved_by: If op_approved_by is specified, the field named in this input will be compared to the value in approved_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_approved_by must be specified if op_approved_by is specified.
:type val_f_approved_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_approved_by: If op_approved_by is specified, this value will be compared to the value in approved_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_approved_by must be specified if op_approved_by is specified.
:type val_c_approved_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The job category. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified.
:type val_f_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified.
:type val_c_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_completed_at: The operator to apply to the field completed_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. completed_at: The time this job completed execution against all devices. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_completed_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_completed_at: If op_completed_at is specified, the field named in this input will be compared to the value in completed_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_completed_at must be specified if op_completed_at is specified.
:type val_f_completed_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_completed_at: If op_completed_at is specified, this value will be compared to the value in completed_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_completed_at must be specified if op_completed_at is specified.
:type val_c_completed_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_config_template_id: The operator to apply to the field config_template_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. config_template_id: The internal identifier for the config template. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_config_template_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_config_template_id: If op_config_template_id is specified, the field named in this input will be compared to the value in config_template_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_config_template_id must be specified if op_config_template_id is specified.
:type val_f_config_template_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_config_template_id: If op_config_template_id is specified, this value will be compared to the value in config_template_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_config_template_id must be specified if op_config_template_id is specified.
:type val_c_config_template_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_config_template_text: The operator to apply to the field config_template_text. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. config_template_text: The content of the config template. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_config_template_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_config_template_text: If op_config_template_text is specified, the field named in this input will be compared to the value in config_template_text using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_config_template_text must be specified if op_config_template_text is specified.
:type val_f_config_template_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_config_template_text: If op_config_template_text is specified, this value will be compared to the value in config_template_text using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_config_template_text must be specified if op_config_template_text is specified.
:type val_c_config_template_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The time when the job was created. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_by: The operator to apply to the field created_by. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_by: The user that created the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_by: If op_created_by is specified, the field named in this input will be compared to the value in created_by using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_by must be specified if op_created_by is specified.
:type val_f_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_by: If op_created_by is specified, this value will be compared to the value in created_by using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_by must be specified if op_created_by is specified.
:type val_c_created_by: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_credential_source: The operator to apply to the field credential_source. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. credential_source: The source of a device credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_credential_source: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_credential_source: If op_credential_source is specified, the field named in this input will be compared to the value in credential_source using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_credential_source must be specified if op_credential_source is specified.
:type val_f_credential_source: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_credential_source: If op_credential_source is specified, this value will be compared to the value in credential_source using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_credential_source must be specified if op_credential_source is specified.
:type val_c_credential_source: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: The description of the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_input_data: The operator to apply to the field input_data. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. input_data: The input data for the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_input_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_input_data: If op_input_data is specified, the field named in this input will be compared to the value in input_data using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_input_data must be specified if op_input_data is specified.
:type val_f_input_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_input_data: If op_input_data is specified, this value will be compared to the value in input_data using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_input_data must be specified if op_input_data is specified.
:type val_c_input_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_job_specification_id: The operator to apply to the field job_specification_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. job_specification_id: The internal identifier for the associated Job Specification. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_job_specification_id: If op_job_specification_id is specified, the field named in this input will be compared to the value in job_specification_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_job_specification_id must be specified if op_job_specification_id is specified.
:type val_f_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_job_specification_id: If op_job_specification_id is specified, this value will be compared to the value in job_specification_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_job_specification_id must be specified if op_job_specification_id is specified.
:type val_c_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_job_type: The operator to apply to the field job_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. job_type: The type of job. This can be Scheduled, Ad Hoc, or Run Now. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_job_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_job_type: If op_job_type is specified, the field named in this input will be compared to the value in job_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_job_type must be specified if op_job_type is specified.
:type val_f_job_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_job_type: If op_job_type is specified, this value will be compared to the value in job_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_job_type must be specified if op_job_type is specified.
:type val_c_job_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_last_status_at: The operator to apply to the field last_status_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. last_status_at: The time when the job status was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_last_status_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_last_status_at: If op_last_status_at is specified, the field named in this input will be compared to the value in last_status_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_last_status_at must be specified if op_last_status_at is specified.
:type val_f_last_status_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_last_status_at: If op_last_status_at is specified, this value will be compared to the value in last_status_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_last_status_at must be specified if op_last_status_at is specified.
:type val_c_last_status_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The name of this job, was entered when the job specification was defined, or when the job was executed from a script. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_provision_data: The operator to apply to the field provision_data. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. provision_data: Internal data for provisioning jobs. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_provision_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_provision_data: If op_provision_data is specified, the field named in this input will be compared to the value in provision_data using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_provision_data must be specified if op_provision_data is specified.
:type val_f_provision_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_provision_data: If op_provision_data is specified, this value will be compared to the value in provision_data using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_provision_data must be specified if op_provision_data is specified.
:type val_c_provision_data: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_run_as: The operator to apply to the field run_as. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. run_as: The user that run the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_run_as: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_run_as: If op_run_as is specified, the field named in this input will be compared to the value in run_as using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_run_as must be specified if op_run_as is specified.
:type val_f_run_as: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_run_as: If op_run_as is specified, this value will be compared to the value in run_as using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_run_as must be specified if op_run_as is specified.
:type val_c_run_as: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_script_id: The operator to apply to the field script_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. script_id: The internal identifier for the script. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_script_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_script_id: If op_script_id is specified, the field named in this input will be compared to the value in script_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_script_id must be specified if op_script_id is specified.
:type val_f_script_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_script_id: If op_script_id is specified, this value will be compared to the value in script_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_script_id must be specified if op_script_id is specified.
:type val_c_script_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_script_language: The operator to apply to the field script_language. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. script_language: The language of the script. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_script_language: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_script_language: If op_script_language is specified, the field named in this input will be compared to the value in script_language using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_script_language must be specified if op_script_language is specified.
:type val_f_script_language: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_script_language: If op_script_language is specified, this value will be compared to the value in script_language using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_script_language must be specified if op_script_language is specified.
:type val_c_script_language: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_script_text: The operator to apply to the field script_text. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. script_text: The content of the script. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_script_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_script_text: If op_script_text is specified, the field named in this input will be compared to the value in script_text using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_script_text must be specified if op_script_text is specified.
:type val_f_script_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_script_text: If op_script_text is specified, this value will be compared to the value in script_text using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_script_text must be specified if op_script_text is specified.
:type val_c_script_text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_started_at: The operator to apply to the field started_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. started_at: The time this job started execution. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_started_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_started_at: If op_started_at is specified, the field named in this input will be compared to the value in started_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_started_at must be specified if op_started_at is specified.
:type val_f_started_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_started_at: If op_started_at is specified, this value will be compared to the value in started_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_started_at must be specified if op_started_at is specified.
:type val_c_started_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status: The operator to apply to the field status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status: The current status of this job, based upon all of the statuses for each device in the job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status: If op_status is specified, the field named in this input will be compared to the value in status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status must be specified if op_status is specified.
:type val_f_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status: If op_status is specified, this value will be compared to the value in status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status must be specified if op_status is specified.
:type val_c_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_taskflow_revert: The operator to apply to the field taskflow_revert. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. taskflow_revert: The internal workflow name for job reversion using the script. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_taskflow_revert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_taskflow_revert: If op_taskflow_revert is specified, the field named in this input will be compared to the value in taskflow_revert using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_taskflow_revert must be specified if op_taskflow_revert is specified.
:type val_f_taskflow_revert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_taskflow_revert: If op_taskflow_revert is specified, this value will be compared to the value in taskflow_revert using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_taskflow_revert must be specified if op_taskflow_revert is specified.
:type val_c_taskflow_revert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_transactional_ind: The operator to apply to the field transactional_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. transactional_ind: Flag indicating if devices should be reserved during execution of this job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_transactional_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_transactional_ind: If op_transactional_ind is specified, the field named in this input will be compared to the value in transactional_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_transactional_ind must be specified if op_transactional_ind is specified.
:type val_f_transactional_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_transactional_ind: If op_transactional_ind is specified, this value will be compared to the value in transactional_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_transactional_ind must be specified if op_transactional_ind is specified.
:type val_c_transactional_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The time when the job was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of job methods. The listed methods will be called on each job returned and included in the output. Available methods are: meta.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: meta.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Job. Valid values are id, name, category, job_type, taskflow_revert, script_id, config_template_id, description, created_by, run_as, credential_source, approved_by, approval_note, provision_data, input_data, transactional_ind, status, last_status_at, started_at, completed_at, created_at, updated_at, approved_at, script_text, script_language, config_template_text, job_specification_id. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return jobs: An array of the Job objects that match the specified input criteria.
:rtype jobs: Array of Job
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def script(self, **kwargs):
"""Downloads the script that ran on each device in a job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The job id to download the script file for.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return script: The script file contents ran on each device in a job. It will be presented as type "application/octet-stream".
:rtype script: String
"""
return self.api_mixed_request(self._get_method_fullname("script"), kwargs)
def issues(self, **kwargs):
"""List any issues associated with the specified job.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param JobID: The id of the job to list.
:type JobID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return as the first record.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The maximum number of records to return.
:type limit: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return issue_details: An array of the IssueDetail objects that match the specified input criteria.
:rtype issue_details: Array of IssueDetail
"""
return self.api_list_request(self._get_method_fullname("issues"), kwargs)
def job_files(self, **kwargs):
"""Lists/downloads common files for a job. If no filename is given, a list of files for the job will be returned. If a filename is passed, and it exists, it will be downloaded as type "application/octet-stream".
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The job id to list files for.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param filename: An optional filename to download.
:type filename: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return filenames: An array of filenames that match the specified input criteria.
:rtype filenames: Array of String
"""
return self.api_mixed_request(self._get_method_fullname("job_files"), kwargs)
def job_archive(self, **kwargs):
"""Downloads zip archive of whole job or job process.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The job id to list files for.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param started_at: The job started at date
:type started_at: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param process_id: The job process id to list files for.
:type process_id: Integer
**Outputs**
"""
return self.api_mixed_request(self._get_method_fullname("job_archive"), kwargs)
def log_message(self, **kwargs):
"""Logs a message to custom log and session log
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param JobID: The id of the job.
:type JobID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param process_id: The id of the job process.
:type process_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param severity: The severity of the message.
:type severity: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param message: A message describing the results.
:type message: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("log_message"), kwargs)
def log_custom_message(self, **kwargs):
"""Logs a message to custom log
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param JobID: The id of the job.
:type JobID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param JobDetailID: The id of the individual device job.
:type JobDetailID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param severity: The severity of the message.
:type severity: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param message: A message describing the results.
:type message: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("log_custom_message"), kwargs)
def cancel(self, **kwargs):
"""Cancel the execution of a job or a process
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the job to cancel.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param process_id: The id of the process to cancel.
:type process_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("cancel"), kwargs)
|
ctb/cvxpy
|
refs/heads/master
|
cvxpy/atoms/lambda_sum_largest.py
|
12
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.expression import Expression
from cvxpy.expressions.variables import Semidef
from cvxpy.atoms.lambda_max import lambda_max
from cvxpy.atoms.affine.trace import trace
def lambda_sum_largest(X, k):
"""Sum of the largest k eigenvalues.
"""
X = Expression.cast_to_const(X)
if X.size[0] != X.size[1]:
raise ValueError("First argument must be a square matrix.")
elif int(k) != k or k <= 0:
raise ValueError("Second argument must be a positive integer.")
"""
S_k(X) denotes lambda_sum_largest(X, k)
t >= k S_k(X - Z) + trace(Z), Z is PSD
implies
t >= ks + trace(Z)
Z is PSD
sI >= X - Z (PSD sense)
which implies
t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X)
We use the fact that
S_k(X) = sup_{sets of k orthonormal vectors u_i}\sum_{i}u_i^T X u_i
and if Z >= X in PSD sense then
\sum_{i}u_i^T Z u_i >= \sum_{i}u_i^T X u_i
We have equality when s = lambda_k and Z diagonal
with Z_{ii} = (lambda_i - lambda_k)_+
"""
Z = Semidef(X.size[0])
return k*lambda_max(X - Z) + trace(Z)
|
mrocklin/toolz
|
refs/heads/master
|
toolz/tests/test_serialization.py
|
9
|
from toolz import *
import pickle
def test_compose():
f = compose(str, sum)
g = pickle.loads(pickle.dumps(f))
assert f((1, 2)) == g((1, 2))
def test_curry():
f = curry(map)(str)
g = pickle.loads(pickle.dumps(f))
assert list(f((1, 2, 3))) == list(g((1, 2, 3)))
def test_juxt():
f = juxt(str, int, bool)
g = pickle.loads(pickle.dumps(f))
assert f(1) == g(1)
assert f.funcs == g.funcs
def test_complement():
f = complement(bool)
assert f(True) is False
assert f(False) is True
g = pickle.loads(pickle.dumps(f))
assert f(True) == g(True)
assert f(False) == g(False)
|
xarus01/clockwork
|
refs/heads/master
|
clockwork/wsgi.py
|
1
|
"""
WSGI config for clockwork project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clockwork.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
arante/pyloc
|
refs/heads/master
|
py2/cool-things/rfactorial.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# rfactorial.py
# Recursive implementation of factorial.
#
# Author: Billy Wilson Arante
# Created: 2016/06/20 PHT
from sys import argv
def factorial(number):
"""Recursive factorial"""
if number <= 1:
return 1
else:
return number * factorial(number - 1)
def test():
"""Driver function"""
for i in range(11):
print "%d! = %d" % (i, factorial(i))
def main():
"""Main"""
number = argv[1]
print factorial(int(number))
if __name__ == "__main__":
main()
|
raildo/python-keystoneclient
|
refs/heads/master
|
keystoneclient/tests/v3/utils.py
|
2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
import uuid
import httpretty
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
from keystoneclient.v3 import client
TestResponse = utils.TestResponse
def parameterize(ref):
"""Rewrites attributes to match the kwarg naming convention in client.
>>> parameterize({'project_id': 0})
{'project': 0}
"""
params = ref.copy()
for key in ref:
if key[-3:] == '_id':
params.setdefault(key[:-3], params.pop(key))
return params
class UnauthenticatedTestCase(utils.TestCase):
"""Class used as base for unauthenticated calls."""
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_URL = '%s%s' % (TEST_ROOT_URL, 'v3')
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_ADMIN_URL = '%s%s' % (TEST_ROOT_ADMIN_URL, 'v3')
class TestCase(UnauthenticatedTestCase):
TEST_SERVICE_CATALOG = [{
"endpoints": [{
"url": "http://cdn.admin-nets.local:8774/v1.0/",
"region": "RegionOne",
"interface": "public"
}, {
"url": "http://127.0.0.1:8774/v1.0",
"region": "RegionOne",
"interface": "internal"
}, {
"url": "http://cdn.admin-nets.local:8774/v1.0",
"region": "RegionOne",
"interface": "admin"
}],
"type": "nova_compat"
}, {
"endpoints": [{
"url": "http://nova/novapi/public",
"region": "RegionOne",
"interface": "public"
}, {
"url": "http://nova/novapi/internal",
"region": "RegionOne",
"interface": "internal"
}, {
"url": "http://nova/novapi/admin",
"region": "RegionOne",
"interface": "admin"
}],
"type": "compute"
}, {
"endpoints": [{
"url": "http://glance/glanceapi/public",
"region": "RegionOne",
"interface": "public"
}, {
"url": "http://glance/glanceapi/internal",
"region": "RegionOne",
"interface": "internal"
}, {
"url": "http://glance/glanceapi/admin",
"region": "RegionOne",
"interface": "admin"
}],
"type": "image",
"name": "glance"
}, {
"endpoints": [{
"url": "http://127.0.0.1:5000/v3",
"region": "RegionOne",
"interface": "public"
}, {
"url": "http://127.0.0.1:5000/v3",
"region": "RegionOne",
"interface": "internal"
}, {
"url": "http://127.0.0.1:35357/v3",
"region": "RegionOne",
"interface": "admin"
}],
"type": "identity"
}, {
"endpoints": [{
"url": "http://swift/swiftapi/public",
"region": "RegionOne",
"interface": "public"
}, {
"url": "http://swift/swiftapi/internal",
"region": "RegionOne",
"interface": "internal"
}, {
"url": "http://swift/swiftapi/admin",
"region": "RegionOne",
"interface": "admin"
}],
"type": "object-store"
}]
def setUp(self):
super(TestCase, self).setUp()
self.client = client.Client(username=self.TEST_USER,
token=self.TEST_TOKEN,
tenant_name=self.TEST_TENANT_NAME,
auth_url=self.TEST_URL,
endpoint=self.TEST_URL)
def stub_auth(self, subject_token=None, **kwargs):
if not subject_token:
subject_token = self.TEST_TOKEN
self.stub_url(httpretty.POST, ['auth', 'tokens'],
X_Subject_Token=subject_token, **kwargs)
class CrudTests(object):
key = None
collection_key = None
model = None
manager = None
path_prefix = None
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
return kwargs
def encode(self, entity):
if isinstance(entity, dict):
return {self.key: entity}
if isinstance(entity, list):
return {self.collection_key: entity}
raise NotImplementedError('Are you sure you want to encode that?')
def stub_entity(self, method, parts=None, entity=None, id=None, **kwargs):
if entity:
entity = self.encode(entity)
kwargs['json'] = entity
if not parts:
parts = [self.collection_key]
if self.path_prefix:
parts.insert(0, self.path_prefix)
if id:
if not parts:
parts = []
parts.append(id)
self.stub_url(method, parts=parts, **kwargs)
def assertEntityRequestBodyIs(self, entity):
self.assertRequestBodyIs(json=self.encode(entity))
@httpretty.activate
def test_create(self, ref=None, req_ref=None):
ref = ref or self.new_ref()
manager_ref = ref.copy()
manager_ref.pop('id')
# req_ref argument allows you to specify a different
# signature for the request when the manager does some
# conversion before doing the request (e.g converting
# from datetime object to timestamp string)
req_ref = req_ref or ref.copy()
req_ref.pop('id')
self.stub_entity(httpretty.POST, entity=req_ref, status=201)
returned = self.manager.create(**parameterize(manager_ref))
self.assertTrue(isinstance(returned, self.model))
for attr in req_ref:
self.assertEqual(
getattr(returned, attr),
req_ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
@httpretty.activate
def test_get(self, ref=None):
ref = ref or self.new_ref()
self.stub_entity(httpretty.GET, id=ref['id'], entity=ref)
returned = self.manager.get(ref['id'])
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
@httpretty.activate
def test_list(self, ref_list=None, expected_path=None, **filter_kwargs):
ref_list = ref_list or [self.new_ref(), self.new_ref()]
if not expected_path:
if self.path_prefix:
expected_path = 'v3/%s/%s' % (self.path_prefix,
self.collection_key)
else:
expected_path = 'v3/%s' % self.collection_key
httpretty.register_uri(httpretty.GET,
urlparse.urljoin(self.TEST_URL, expected_path),
body=jsonutils.dumps(self.encode(ref_list)))
returned_list = self.manager.list(**filter_kwargs)
self.assertTrue(len(returned_list))
[self.assertTrue(isinstance(r, self.model)) for r in returned_list]
@httpretty.activate
def test_find(self, ref=None):
ref = ref or self.new_ref()
ref_list = [ref]
self.stub_entity(httpretty.GET, entity=ref_list)
returned = self.manager.find(name=getattr(ref, 'name', None))
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
if hasattr(ref, 'name'):
self.assertQueryStringIs({'name': ref['name']})
else:
self.assertQueryStringIs({})
@httpretty.activate
def test_update(self, ref=None):
ref = ref or self.new_ref()
self.stub_entity(httpretty.PATCH, id=ref['id'], entity=ref)
req_ref = ref.copy()
req_ref.pop('id')
returned = self.manager.update(ref['id'], **parameterize(req_ref))
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
self.assertEntityRequestBodyIs(req_ref)
@httpretty.activate
def test_delete(self, ref=None):
ref = ref or self.new_ref()
self.stub_entity(httpretty.DELETE, id=ref['id'], status=204)
self.manager.delete(ref['id'])
class QuotaCrudTests(object):
key = None
collection_key = None
model = None
manager = None
path_prefix = None
complement = None
def new_ref(self, **kwargs):
kwargs.setdefault('id', uuid.uuid4().hex)
return kwargs
def encode(self, entity):
if isinstance(entity, dict):
return {self.key: entity}
if isinstance(entity, list):
return {self.collection_key: entity}
raise NotImplementedError('Are you sure you want to encode that?')
def stub_entity(self, method, parts=None, entity=None, id=None, **kwargs):
if entity:
entity = self.encode(entity)
kwargs['json'] = entity
if not parts:
parts = [self.collection_key]
if self.path_prefix:
parts.insert(0, self.path_prefix)
if id:
if not parts:
parts = []
parts.append(id)
self.stub_url(method, parts=parts, **kwargs)
def assertEntityRequestBodyIs(self, entity):
self.assertRequestBodyIs(json=self.encode(entity))
@httpretty.activate
def test_get(self, ref=None):
ref = ref or self.new_ref()
self.stub_entity(httpretty.GET, id=ref['id'], entity=ref)
returned = self.manager.get(ref['id'])
self.assertTrue(isinstance(returned, self.model))
for attr in ref:
self.assertEqual(
getattr(returned, attr),
ref[attr],
'Expected different %s' % attr)
|
emilsvennesson/kodi-viaplay
|
refs/heads/master
|
__init__.py
|
9
|
# dummy file to init the directory
|
darkwing/kuma
|
refs/heads/master
|
vendor/packages/logilab/astng/builder.py
|
25
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""The ASTNGBuilder makes astng from living object and / or from _ast
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, exists, abspath
from logilab.common.modutils import modpath_from_file
from logilab.astng.exceptions import ASTNGBuildingException, InferenceError
from logilab.astng.raw_building import InspectBuilder
from logilab.astng.rebuilder import TreeRebuilder
from logilab.astng.manager import ASTNGManager
from logilab.astng.bases import YES, Instance
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
if sys.version_info >= (3, 0):
from tokenize import detect_encoding
def open_source_file(filename):
byte_stream = open(filename, 'bU')
encoding = detect_encoding(byte_stream.readline)[0]
stream = open(filename, 'U', encoding=encoding)
try:
data = stream.read()
except UnicodeError, uex: # wrong encodingg
# detect_encoding returns utf-8 if no encoding specified
msg = 'Wrong (%s) or no encoding specified' % encoding
raise ASTNGBuildingException(msg)
return stream, encoding, data
else:
import re
_ENCODING_RGX = re.compile("\s*#+.*coding[:=]\s*([-\w.]+)")
def _guess_encoding(string):
"""get encoding from a python file as string or return None if not found
"""
# check for UTF-8 byte-order mark
if string.startswith('\xef\xbb\xbf'):
return 'UTF-8'
for line in string.split('\n', 2)[:2]:
# check for encoding declaration
match = _ENCODING_RGX.match(line)
if match is not None:
return match.group(1)
def open_source_file(filename):
"""get data for parsing a file"""
stream = open(filename, 'U')
data = stream.read()
encoding = _guess_encoding(data)
return stream, encoding, data
# ast NG builder ##############################################################
MANAGER = ASTNGManager()
class ASTNGBuilder(InspectBuilder):
"""provide astng building methods"""
rebuilder = TreeRebuilder()
def __init__(self, manager=None):
InspectBuilder.__init__(self)
self._manager = manager or MANAGER
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
stream, encoding, data = open_source_file(path)
except IOError, exc:
msg = 'Unable to load file %r (%s)' % (path, exc)
raise ASTNGBuildingException(msg)
except SyntaxError, exc: # py3k encoding specification error
raise ASTNGBuildingException(exc)
except LookupError, exc: # unknown encoding
raise ASTNGBuildingException(exc)
# get module name if necessary
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
node = self.string_build(data, modname, path)
node.file_encoding = encoding
return node
def string_build(self, data, modname='', path=None):
"""build astng from source code string and return rebuilded astng"""
module = self._data_build(data, modname, path)
self._manager.astng_cache[module.name] = module
# post tree building steps after we stored the module in the cache:
for from_node in module._from_nodes:
self.add_from_names_to_locals(from_node)
# handle delayed assattr nodes
for delayed in module._delayed_assattr:
self.delayed_assattr(delayed)
if modname:
for transformer in self._manager.transformers:
transformer(module)
return module
def _data_build(self, data, modname, path):
"""build tree node from data and add some informations"""
# this method could be wrapped with a pickle/cache function
node = parse(data + '\n')
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
self.rebuilder.init()
module = self.rebuilder.visit_module(node, modname, package)
module.file = module.path = node_file
module._from_nodes = self.rebuilder._from_nodes
module._delayed_assattr = self.rebuilder._delayed_assattr
return module
def add_from_names_to_locals(self, node):
"""store imported names to the locals;
resort the locals if coming from a delayed node
"""
_key_func = lambda node: node.fromlineno
def sort_locals(my_list):
my_list.sort(key=_key_func)
for (name, asname) in node.names:
if name == '*':
try:
imported = node.root().import_module(node.modname)
except ASTNGBuildingException:
continue
for name in imported.wildcard_import_names():
node.parent.set_local(name, node)
sort_locals(node.parent.scope().locals[name])
else:
node.parent.set_local(asname or name, node)
sort_locals(node.parent.scope().locals[asname or name])
def delayed_assattr(self, node):
"""visit a AssAttr node -> add name to locals, handle members
definition
"""
try:
frame = node.frame()
for infered in node.expr.infer():
if infered is YES:
continue
try:
if infered.__class__ is Instance:
infered = infered._proxied
iattrs = infered.instance_attrs
elif isinstance(infered, Instance):
# Const, Tuple, ... we may be wrong, may be not, but
# anyway we don't want to pollute builtin's namespace
continue
elif infered.is_function:
iattrs = infered.instance_attrs
else:
iattrs = infered.locals
except AttributeError:
# XXX log error
#import traceback
#traceback.print_exc()
continue
values = iattrs.setdefault(node.attrname, [])
if node in values:
continue
# get assign in __init__ first XXX useful ?
if frame.name == '__init__' and values and not \
values[0].frame().name == '__init__':
values.insert(0, node)
else:
values.append(node)
except InferenceError:
pass
|
xuweiliang/Codelibrary
|
refs/heads/master
|
nova/policies/extended_status.py
|
6
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-extended-status'
POLICY_ROOT = 'os_compute_api:os-extended-status:%s'
extended_status_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return extended_status_policies
|
apache/incubator-airflow
|
refs/heads/master
|
tests/ti_deps/deps/test_trigger_rule_dep.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from unittest.mock import Mock
from airflow import settings
from airflow.models import DAG, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.dummy import DummyOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from tests.models import DEFAULT_DATE
from tests.test_utils.db import clear_db_runs
class TestTriggerRuleDep(unittest.TestCase):
def _get_task_instance(self, trigger_rule=TriggerRule.ALL_SUCCESS, state=None, upstream_task_ids=None):
task = BaseOperator(task_id='test_task', trigger_rule=trigger_rule, start_date=datetime(2015, 1, 1))
if upstream_task_ids:
task._upstream_task_ids.update(upstream_task_ids)
return TaskInstance(task=task, state=state, execution_date=task.start_date)
def test_no_upstream_tasks(self):
"""
If the TI has no upstream TIs then there is nothing to check and the dep is passed
"""
ti = self._get_task_instance(TriggerRule.ALL_DONE, State.UP_FOR_RETRY)
self.assertTrue(TriggerRuleDep().is_met(ti=ti))
def test_dummy_tr(self):
"""
The dummy trigger rule should always pass this dep
"""
ti = self._get_task_instance(TriggerRule.DUMMY, State.UP_FOR_RETRY)
self.assertTrue(TriggerRuleDep().is_met(ti=ti))
def test_one_success_tr_success(self):
"""
One-success trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ONE_SUCCESS, State.UP_FOR_RETRY)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_one_success_tr_failure(self):
"""
One-success trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ONE_SUCCESS)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_one_failure_tr_failure(self):
"""
One-failure trigger rule failure
"""
ti = self._get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_one_failure_tr_success(self):
"""
One-failure trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_all_success_tr_success(self):
"""
All-success trigger rule success
"""
ti = self._get_task_instance(TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_all_success_tr_failure(self):
"""
All-success trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=1,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_success_tr_skip(self):
"""
All-success trigger rule fails when some upstream tasks are skipped.
"""
ti = self._get_task_instance(
TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_success_tr_skip_flag_upstream(self):
"""
All-success trigger rule fails when some upstream tasks are skipped. The state of the ti
should be set to SKIPPED when flag_upstream_failed is True.
"""
ti = self._get_task_instance(
TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
self.assertEqual(ti.state, State.SKIPPED)
def test_none_failed_tr_success(self):
"""
All success including skip trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_none_failed_tr_skipped(self):
"""
All success including all upstream skips trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
self.assertEqual(len(dep_statuses), 0)
self.assertEqual(ti.state, State.NONE)
def test_none_failed_tr_failure(self):
"""
All success including skip trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_none_failed_or_skipped_tr_success(self):
"""
All success including skip trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED_OR_SKIPPED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_none_failed_or_skipped_tr_skipped(self):
"""
All success including all upstream skips trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED_OR_SKIPPED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
self.assertEqual(len(dep_statuses), 0)
self.assertEqual(ti.state, State.SKIPPED)
def test_none_failed_or_skipped_tr_failure(self):
"""
All success including skip trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.NONE_FAILED_OR_SKIPPED,
upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"],
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_failed_tr_success(self):
"""
All-failed trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.ALL_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_all_failed_tr_failure(self):
"""
All-failed trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.ALL_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_all_done_tr_success(self):
"""
All-done trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.ALL_DONE, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 0)
def test_all_done_tr_failure(self):
"""
All-done trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.ALL_DONE, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_none_skipped_tr_success(self):
"""
None-skipped trigger rule success
"""
ti = self._get_task_instance(
TriggerRule.NONE_SKIPPED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"]
)
with create_session() as session:
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session=session,
)
)
self.assertEqual(len(dep_statuses), 0)
# with `flag_upstream_failed` set to True
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=3,
upstream_failed=0,
done=3,
flag_upstream_failed=True,
session=session,
)
)
self.assertEqual(len(dep_statuses), 0)
def test_none_skipped_tr_failure(self):
"""
None-skipped trigger rule failure
"""
ti = self._get_task_instance(
TriggerRule.NONE_SKIPPED, upstream_task_ids=["FakeTaskID", "SkippedTaskID"]
)
with create_session() as session:
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session=session,
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
# with `flag_upstream_failed` set to True
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=session,
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
# Fail until all upstream tasks have completed execution
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=0,
upstream_failed=0,
done=0,
flag_upstream_failed=False,
session=session,
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_unknown_tr(self):
"""
Unknown trigger rules should cause this dep to fail
"""
ti = self._get_task_instance()
ti.task.trigger_rule = "Unknown Trigger Rule"
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
self.assertEqual(len(dep_statuses), 1)
self.assertFalse(dep_statuses[0].passed)
def test_get_states_count_upstream_ti(self):
"""
this test tests the helper function '_get_states_count_upstream_ti' as a unit and inside update_state
"""
from airflow.ti_deps.dep_context import DepContext
get_states_count_upstream_ti = TriggerRuleDep._get_states_count_upstream_ti
session = settings.Session()
now = timezone.utcnow()
dag = DAG('test_dagrun_with_pre_tis', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E', trigger_rule=TriggerRule.ONE_FAILED)
op1.set_downstream([op2, op3]) # op1 >> op2, op3
op4.set_upstream([op3, op2]) # op3, op2 >> op4
op5.set_upstream([op2, op3, op4]) # (op2, op3, op4) >> op5
clear_db_runs()
dag.clear()
dr = dag.create_dagrun(
run_id='test_dagrun_with_pre_tis', state=State.RUNNING, execution_date=now, start_date=now
)
ti_op1 = TaskInstance(task=dag.get_task(op1.task_id), execution_date=dr.execution_date)
ti_op2 = TaskInstance(task=dag.get_task(op2.task_id), execution_date=dr.execution_date)
ti_op3 = TaskInstance(task=dag.get_task(op3.task_id), execution_date=dr.execution_date)
ti_op4 = TaskInstance(task=dag.get_task(op4.task_id), execution_date=dr.execution_date)
ti_op5 = TaskInstance(task=dag.get_task(op5.task_id), execution_date=dr.execution_date)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
ti_op5.set_state(state=State.SUCCESS, session=session)
session.commit()
# check handling with cases that tasks are triggered from backfill with no finished tasks
finished_tasks = DepContext().ensure_finished_tasks(ti_op2.task.dag, ti_op2.execution_date, session)
self.assertEqual(
get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op2), (1, 0, 0, 0, 1)
)
finished_tasks = dr.get_task_instances(state=State.finished, session=session)
self.assertEqual(
get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op4), (1, 0, 1, 0, 2)
)
self.assertEqual(
get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op5), (2, 0, 1, 0, 3)
)
dr.update_state()
self.assertEqual(State.SUCCESS, dr.state)
|
uw-it-aca/myuw
|
refs/heads/master
|
myuw/test/api/test_resources.py
|
1
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
from django.urls import reverse
from myuw.dao.user import get_user_model
from myuw.models import ResourceCategoryPin
from myuw.test import get_request_with_user
from myuw.test.api import MyuwApiTest
class TestResources(MyuwApiTest):
def test_get_resources_list(self):
self.set_user('javerage')
url = reverse('myuw_resources_api')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 9)
self.assertEqual(data[1]["category_id"],
"emailandaccountsandidentity")
self.assertEqual(data[2]["category_id"],
"servicesforfacultyandstaff")
def test_pin_resource(self):
self.set_user('bill')
url = reverse('myuw_resources_pin_api',
kwargs={'category_id': 'teachingtools'})
response = self.client.post(
url, content_type='application_json')
self.assertEqual(response.status_code, 200)
categories = ResourceCategoryPin.objects.all()
self.assertEqual(len(categories), 1)
self.assertEqual(categories[0].resource_category_id, 'teachingtools')
response = self.client.delete(
url, content_type='application_json')
self.assertEqual(response.status_code, 200)
categories = ResourceCategoryPin.objects.all()
self.assertEqual(len(categories), 0)
def test_disable_action(self):
with self.settings(DEBUG=False,
MYUW_DISABLE_ACTIONS_WHEN_OVERRIDE=True):
self.set_user('javerage')
self.set_userservice_override('bill')
url = reverse('myuw_resources_pin_api',
kwargs={'category_id': 'teachinginclasstools'})
response = self.client.post(
url, content_type='application_json')
self.assertEqual(response.status_code, 403)
url = reverse('myuw_resources_pin_api',
kwargs={'category_id': 'teachinginclasstools'})
response = self.client.delete(
url, content_type='application_json')
self.assertEqual(response.status_code, 403)
|
deepsrijit1105/edx-platform
|
refs/heads/master
|
lms/djangoapps/course_blocks/api.py
|
12
|
"""
API entry point to the course_blocks app with top-level
get_course_blocks function.
"""
from openedx.core.djangoapps.content.block_structure.api import get_block_structure_manager
from openedx.core.lib.block_structure.transformers import BlockStructureTransformers
from .transformers import (
library_content,
start_date,
user_partitions,
visibility,
)
from .usage_info import CourseUsageInfo
# Default list of transformers for manipulating course block structures
# based on the user's access to the course blocks.
COURSE_BLOCK_ACCESS_TRANSFORMERS = [
library_content.ContentLibraryTransformer(),
start_date.StartDateTransformer(),
user_partitions.UserPartitionTransformer(),
visibility.VisibilityTransformer(),
]
def get_course_blocks(
user,
starting_block_usage_key,
transformers=None,
collected_block_structure=None,
):
"""
A higher order function implemented on top of the
block_structure.get_blocks function returning a transformed block
structure for the given user starting at starting_block_usage_key.
Arguments:
user (django.contrib.auth.models.User) - User object for
which the block structure is to be transformed.
starting_block_usage_key (UsageKey) - Specifies the starting block
of the block structure that is to be transformed.
transformers (BlockStructureTransformers) - A collection of
transformers whose transform methods are to be called.
If None, COURSE_BLOCK_ACCESS_TRANSFORMERS is used.
collected_block_structure (BlockStructureBlockData) - A
block structure retrieved from a prior call to
BlockStructureManager.get_collected. Can be optionally
provided if already available, for optimization.
Returns:
BlockStructureBlockData - A transformed block structure,
starting at starting_block_usage_key, that has undergone the
transform methods for the given user and the course
associated with the block structure. If using the default
transformers, the transformed block structure will be
exactly equivalent to the blocks that the given user has
access.
"""
if not transformers:
transformers = BlockStructureTransformers(COURSE_BLOCK_ACCESS_TRANSFORMERS)
transformers.usage_info = CourseUsageInfo(starting_block_usage_key.course_key, user)
return get_block_structure_manager(starting_block_usage_key.course_key).get_transformed(
transformers,
starting_block_usage_key,
collected_block_structure,
)
|
vabue/RatticWeb
|
refs/heads/master
|
cred/migrations/0026_auto.py
|
7
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Cred', fields ['title']
db.create_index('cred_cred', ['title'])
# Adding index on 'Cred', fields ['is_deleted']
db.create_index('cred_cred', ['is_deleted'])
# Adding index on 'Tag', fields ['name']
db.create_index('cred_tag', ['name'])
def backwards(self, orm):
# Removing index on 'Tag', fields ['name']
db.delete_index('cred_tag', ['name'])
# Removing index on 'Cred', fields ['is_deleted']
db.delete_index('cred_cred', ['is_deleted'])
# Removing index on 'Cred', fields ['title']
db.delete_index('cred_cred', ['title'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cred.cred': {
'Meta': {'object_name': 'Cred'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'descriptionmarkdown': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'iconname': ('django.db.models.fields.CharField', [], {'default': "'Key.png'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'latest': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'history'", 'null': 'True', 'to': "orm['cred.Cred']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': "orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'cred.credaudit': {
'Meta': {'ordering': "('-time',)", 'object_name': 'CredAudit'},
'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': "orm['auth.User']"})
},
'cred.credchangeq': {
'Meta': {'object_name': 'CredChangeQ'},
'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cred.Cred']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cred.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'})
}
}
complete_apps = ['cred']
|
CKboss/VirtualJudgePY
|
refs/heads/master
|
Handlers/CrawlerOnlineHandler.py
|
1
|
import tornado.web
import tornado.gen
import json
import time
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from Handlers.BaseHandler import BaseHandler
from Config.ParametersConfig import MID_THREAD_POOL_SIZE
from Crawler.BnuVJCrawler.BnuVJCrawler import BnuVJCrawler
from Crawler.HustCrawler.HustCrawler import HustCrawler
from UIModule.MsgModule import renderMSG
class CrawlerOnlineHandler(BaseHandler):
executor = ThreadPoolExecutor(MID_THREAD_POOL_SIZE)
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
self.render('crawleronline.html')
@tornado.web.asynchronous
@tornado.gen.engine
def post(self):
if self.request.files is not None and len(self.request.files.keys())!=0:
csv_meta = self.request.files
content = csv_meta['csv'][0]['body'].decode()
problemlist = content.split('\n')
if len(problemlist) > 20 :
self.write(renderMSG('Too many Problem to Crawler'))
self.finish()
else :
nt = yield self.CrawlerCSV(problemlist)
self.write(renderMSG('{} problem crawler success'.format(nt)))
self.finish()
return
vj = self.get_argument('VJ',None)
oj = self.get_argument('oj',None)
prob = self.get_argument('prob',None)
if oj is None or vj is None or prob is None or oj == 'ALL' :
self.finish()
return
isok = yield self.CrawlerIt(vj,oj,prob)
msg = renderMSG('Crawler Success! Visit <a href="/problem/{}/{}">here</a> enjoy it!'.format(oj,prob),waittime=1000000)
self.write(msg)
self.finish()
@run_on_executor
def CrawlerIt(self,vj,oj,prob):
if vj == 'BNUVJ':
if oj == 'ZOJ' : oj = 'ZJU'
bvc = BnuVJCrawler()
bvc.CrawlerProblem(originOJ=oj,originProb=prob)
elif vj == 'HUST':
huc = HustCrawler()
huc.CrawlerProblem(oj,prob)
time.sleep(3)
return True
@run_on_executor
def CrawlerCSV(self,problemlist):
nt=0
for line in problemlist :
item = line.split(',')
if len(item) == 2 :
oj = item[0]
prob = item[1]
if self.CrawlerIt('HUST',oj,prob) : nt+=1
time.sleep(2)
return nt
|
Ecpy/ecpy
|
refs/heads/master
|
tests/app/errors/__init__.py
|
803
|
# -*- coding: utf-8 -*-
|
Vladkryvoruchko/PSPNet-Keras-tensorflow
|
refs/heads/master
|
caffe-tensorflow/kaffe/graph.py
|
9
|
from google.protobuf import text_format
from .caffe import get_caffe_resolver
from .errors import KaffeError, print_stderr
from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch
from .shapes import TensorShape
class Node(object):
def __init__(self, name, kind, layer=None):
self.name = name
self.kind = kind
self.layer = LayerAdapter(layer, kind) if layer else None
self.parents = []
self.children = []
self.data = None
self.output_shape = None
self.metadata = {}
def add_parent(self, parent_node):
assert parent_node not in self.parents
self.parents.append(parent_node)
if self not in parent_node.children:
parent_node.children.append(self)
def add_child(self, child_node):
assert child_node not in self.children
self.children.append(child_node)
if self not in child_node.parents:
child_node.parents.append(self)
def get_only_parent(self):
if len(self.parents) != 1:
raise KaffeError('Node (%s) expected to have 1 parent. Found %s.' %
(self, len(self.parents)))
return self.parents[0]
@property
def parameters(self):
if self.layer is not None:
return self.layer.parameters
return None
def __str__(self):
return '[%s] %s' % (self.kind, self.name)
def __repr__(self):
return '%s (0x%x)' % (self.name, id(self))
class Graph(object):
def __init__(self, nodes=None, name=None):
self.nodes = nodes or []
self.node_lut = {node.name: node for node in self.nodes}
self.name = name
def add_node(self, node):
self.nodes.append(node)
self.node_lut[node.name] = node
def get_node(self, name):
try:
return self.node_lut[name]
except KeyError:
raise KaffeError('Layer not found: %s' % name)
def get_input_nodes(self):
return [node for node in self.nodes if len(node.parents) == 0]
def get_output_nodes(self):
return [node for node in self.nodes if len(node.children) == 0]
def topologically_sorted(self):
sorted_nodes = []
unsorted_nodes = list(self.nodes)
temp_marked = set()
perm_marked = set()
def visit(node):
if node in temp_marked:
raise KaffeError('Graph is not a DAG.')
if node in perm_marked:
return
temp_marked.add(node)
for child in node.children:
visit(child)
perm_marked.add(node)
temp_marked.remove(node)
sorted_nodes.insert(0, node)
while len(unsorted_nodes):
visit(unsorted_nodes.pop())
return sorted_nodes
def compute_output_shapes(self):
sorted_nodes = self.topologically_sorted()
for node in sorted_nodes:
node.output_shape = TensorShape(*NodeKind.compute_output_shape(node))
def replaced(self, new_nodes):
return Graph(nodes=new_nodes, name=self.name)
def transformed(self, transformers):
graph = self
for transformer in transformers:
graph = transformer(graph)
if graph is None:
raise KaffeError('Transformer failed: {}'.format(transformer))
assert isinstance(graph, Graph)
return graph
def __contains__(self, key):
return key in self.node_lut
def __str__(self):
hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', 'Output')
s = [hdr, '-' * 94]
for node in self.topologically_sorted():
# If the node has learned parameters, display the first one's shape.
# In case of convolutions, this corresponds to the weights.
data_shape = node.data[0].shape if node.data else '--'
out_shape = node.output_shape or '--'
s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, node.name, data_shape,
tuple(out_shape)))
return '\n'.join(s)
class GraphBuilder(object):
'''Constructs a model graph from a Caffe protocol buffer definition.'''
def __init__(self, def_path, phase='test'):
'''
def_path: Path to the model definition (.prototxt)
data_path: Path to the model data (.caffemodel)
phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
'''
self.def_path = def_path
self.phase = phase
self.load()
def load(self):
'''Load the layer definitions from the prototxt.'''
self.params = get_caffe_resolver().NetParameter()
with open(self.def_path, 'rb') as def_file:
text_format.Merge(def_file.read(), self.params)
def filter_layers(self, layers):
'''Filter out layers based on the current phase.'''
phase_map = {0: 'train', 1: 'test'}
filtered_layer_names = set()
filtered_layers = []
for layer in layers:
phase = self.phase
if len(layer.include):
phase = phase_map[layer.include[0].phase]
if len(layer.exclude):
phase = phase_map[1 - layer.include[0].phase]
exclude = (phase != self.phase)
# Dropout layers appear in a fair number of Caffe
# test-time networks. These are just ignored. We'll
# filter them out here.
if (not exclude) and (phase == 'test'):
exclude = (layer.type == LayerType.Dropout)
if not exclude:
filtered_layers.append(layer)
# Guard against dupes.
assert layer.name not in filtered_layer_names
filtered_layer_names.add(layer.name)
return filtered_layers
def make_node(self, layer):
'''Create a graph node for the given layer.'''
kind = NodeKind.map_raw_kind(layer.type)
if kind is None:
raise KaffeError('Unknown layer type encountered: %s' % layer.type)
# We want to use the layer's top names (the "output" names), rather than the
# name attribute, which is more of readability thing than a functional one.
# Other layers will refer to a node by its "top name".
return Node(layer.name, kind, layer=layer)
def make_input_nodes(self):
'''
Create data input nodes.
This method is for old-style inputs, where the input specification
was not treated as a first-class layer in the prototext.
Newer models use the "Input layer" type.
'''
nodes = [Node(name, NodeKind.Data) for name in self.params.input]
if len(nodes):
input_dim = map(int, self.params.input_dim)
if not input_dim:
if len(self.params.input_shape) > 0:
input_dim = map(int, self.params.input_shape[0].dim)
else:
raise KaffeError('Dimensions for input not specified.')
for node in nodes:
node.output_shape = tuple(input_dim)
return nodes
def build(self):
'''
Builds the graph from the Caffe layer definitions.
'''
# Get the layers
layers = self.params.layers or self.params.layer
# Filter out phase-excluded layers
layers = self.filter_layers(layers)
# Get any separately-specified input layers
nodes = self.make_input_nodes()
nodes += [self.make_node(layer) for layer in layers]
# Initialize the graph
graph = Graph(nodes=nodes, name=self.params.name)
# Connect the nodes
#
# A note on layers and outputs:
# In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs
# ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom
# (in case of in-place operations). Note that the layer's name is not used for establishing
# any connectivity. It's only used for data association. By convention, a layer with a
# single top will often use the same name (although this is not required).
#
# The current implementation only supports single-output nodes (note that a node can still
# have multiple children, since multiple child nodes can refer to the single top's name).
node_outputs = {}
for layer in layers:
node = graph.get_node(layer.name)
for input_name in layer.bottom:
assert input_name != layer.name
parent_node = node_outputs.get(input_name)
if (parent_node is None) or (parent_node == node):
parent_node = graph.get_node(input_name)
node.add_parent(parent_node)
if len(layer.top)>1:
raise KaffeError('Multiple top nodes are not supported.')
for output_name in layer.top:
if output_name == layer.name:
# Output is named the same as the node. No further action required.
continue
# There are two possibilities here:
#
# Case 1: output_name refers to another node in the graph.
# This is an "in-place operation" that overwrites an existing node.
# This would create a cycle in the graph. We'll undo the in-placing
# by substituting this node wherever the overwritten node is referenced.
#
# Case 2: output_name violates the convention layer.name == output_name.
# Since we are working in the single-output regime, we will can rename it to
# match the layer name.
#
# For both cases, future references to this top re-routes to this node.
node_outputs[output_name] = node
graph.compute_output_shapes()
return graph
class NodeMapper(NodeDispatch):
def __init__(self, graph):
self.graph = graph
def map(self):
nodes = self.graph.topologically_sorted()
# Remove input nodes - we'll handle them separately.
input_nodes = self.graph.get_input_nodes()
nodes = [t for t in nodes if t not in input_nodes]
# Decompose DAG into chains.
chains = []
for node in nodes:
attach_to_chain = None
if len(node.parents) == 1:
parent = node.get_only_parent()
for chain in chains:
if chain[-1] == parent:
# Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None:
# Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Map each chain.
mapped_chains = []
for chain in chains:
mapped_chains.append(self.map_chain(chain))
return self.commit(mapped_chains)
def map_chain(self, chain):
return [self.map_node(node) for node in chain]
def map_node(self, node):
map_func = self.get_handler(node.kind, 'map')
mapped_node = map_func(node)
assert mapped_node is not None
mapped_node.node = node
return mapped_node
def commit(self, mapped_chains):
raise NotImplementedError('Must be implemented by subclass.')
|
mtlchun/edx
|
refs/heads/master
|
pavelib/utils/test/suites/acceptance_suite.py
|
10
|
"""
Acceptance test suite
"""
from paver.easy import sh, call_task
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites import TestSuite
from pavelib.utils.envs import Env
__test__ = False # do not collect
class AcceptanceTest(TestSuite):
"""
A class for running lettuce acceptance tests.
"""
def __init__(self, *args, **kwargs):
super(AcceptanceTest, self).__init__(*args, **kwargs)
self.report_dir = Env.REPORT_DIR / 'acceptance'
self.fasttest = kwargs.get('fasttest', False)
self.system = kwargs.get('system')
self.default_store = kwargs.get('default_store')
self.extra_args = kwargs.get('extra_args', '')
def __enter__(self):
super(AcceptanceTest, self).__enter__()
self.report_dir.makedirs_p()
self._update_assets()
def __exit__(self, exc_type, exc_value, traceback):
super(AcceptanceTest, self).__exit__(exc_type, exc_value, traceback)
test_utils.clean_mongo()
@property
def cmd(self):
report_file = self.report_dir / "{}.xml".format(self.system)
report_args = "--with-xunit --xunit-file {}".format(report_file)
cmd = (
"DEFAULT_STORE={default_store} ./manage.py {system} --settings acceptance harvest --traceback "
"--debug-mode --verbosity {verbosity} {report_args} {extra_args}".format(
default_store=self.default_store,
system=self.system,
verbosity=self.verbosity,
report_args=report_args,
extra_args=self.extra_args,
)
)
return cmd
def _update_assets(self):
args = [self.system, '--settings=acceptance']
if self.fasttest:
args.append('--skip-collect')
call_task('pavelib.assets.update_assets', args=args)
class AcceptanceTestSuite(TestSuite):
"""
A class for running lettuce acceptance tests.
"""
def __init__(self, *args, **kwargs):
super(AcceptanceTestSuite, self).__init__(*args, **kwargs)
self.root = 'acceptance'
self.db = Env.REPO_ROOT / 'test_root/db/test_edx.db'
self.db_cache = Env.REPO_ROOT / 'common/test/db_cache/lettuce.db'
self.fasttest = kwargs.get('fasttest', False)
if kwargs.get('system'):
systems = [kwargs['system']]
else:
systems = ['lms', 'cms']
if kwargs.get('default_store'):
stores = [kwargs['default_store']]
else:
# TODO fix Acceptance tests with Split (LMS-11300)
# stores = ['split', 'draft']
stores = ['draft']
self.subsuites = []
for system in systems:
for default_store in stores:
kwargs['system'] = system
kwargs['default_store'] = default_store
self.subsuites.append(AcceptanceTest('{} acceptance using {}'.format(system, default_store), **kwargs))
def __enter__(self):
super(AcceptanceTestSuite, self).__enter__()
if not self.skip_clean:
test_utils.clean_test_files()
if not self.fasttest:
self._setup_acceptance_db()
def _setup_acceptance_db(self):
"""
TODO: Improve the following
Since the CMS depends on the existence of some database tables
that are now in common but used to be in LMS (Role/Permissions for Forums)
we need to create/migrate the database tables defined in the LMS.
We might be able to address this by moving out the migrations from
lms/django_comment_client, but then we'd have to repair all the existing
migrations from the upgrade tables in the DB.
But for now for either system (lms or cms), use the lms
definitions to sync and migrate.
"""
if self.db.isfile():
# Since we are using SQLLite, we can reset the database by deleting it on disk.
self.db.remove()
if self.db_cache.isfile():
# To speed up migrations, we check for a cached database file and start from that.
# The cached database file should be checked into the repo
# Copy the cached database to the test root directory
sh("cp {db_cache} {db}".format(db_cache=self.db_cache, db=self.db))
# Run migrations to update the db, starting from its cached state
sh("./manage.py lms --settings acceptance migrate --traceback --noinput")
sh("./manage.py cms --settings acceptance migrate --traceback --noinput")
else:
# If no cached database exists, syncdb before migrating, then create the cache
sh("./manage.py lms --settings acceptance syncdb --traceback --noinput")
sh("./manage.py cms --settings acceptance syncdb --traceback --noinput")
sh("./manage.py lms --settings acceptance migrate --traceback --noinput")
sh("./manage.py cms --settings acceptance migrate --traceback --noinput")
# Create the cache if it doesn't already exist
sh("cp {db} {db_cache}".format(db_cache=self.db_cache, db=self.db))
|
40223210/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/errno.py
|
624
|
"""
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
|
ujenmr/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_log_syslogd_setting.py
|
23
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_setting
short_description: Global settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd_setting:
description:
- Global settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Global settings for remote syslog server.
fortios_log_syslogd_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
port: "12"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'port', 'server', 'source-ip',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd_setting(data, fos):
vdom = data['vdom']
log_syslogd_setting_data = data['log_syslogd_setting']
flattened_data = flatten_multilists_attributes(log_syslogd_setting_data)
filtered_data = filter_log_syslogd_setting_data(flattened_data)
return fos.set('log.syslogd',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd(data, fos):
login(data)
if data['log_syslogd_setting']:
resp = log_syslogd_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
jopohl/urh
|
refs/heads/master
|
tests/data/code.py
|
1
|
#!/usr/bin/env python3
import os
import sys
from subprocess import call
cur_dir = os.path.dirname(os.path.realpath(__file__))
if sys.argv[1] == "e":
call(sys.executable + ' "' + os.path.join(cur_dir, "encode.py") + '"' + " " + sys.argv[2], shell=True)
elif sys.argv[1] == "d":
call(sys.executable + ' "' + os.path.join(cur_dir, "decode.py") + '"' + " " + sys.argv[2], shell=True)
else:
print("Unknown")
|
intagger/paraproxio
|
refs/heads/master
|
paraproxio.py
|
1
|
#!/usr/bin/env python
"""
Paraproxio is an HTTP proxy with a parallel downloading of big files.
"""
# Copyright (C) 2016 Alexander Logger <intagger@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = '1.0'
PARAPROXIO_VERSION = "Paraproxio/" + __version__
import sys
req_ver = (3, 5)
if sys.version_info < req_ver:
print('Minimum Python/{0[0]}.{0[1]} required. You run this script with Python/{1[0]}.{1[1]}.'.format(
req_ver, sys.version_info),
file=sys.stderr)
exit(1)
import asyncio
import logging
import os
import shutil
import argparse
import concurrent.futures
import json
import traceback
import socket
import http.server
import threading
from time import time
from asyncio import AbstractEventLoop, Future, ensure_future, wait, wait_for, TimeoutError
from asyncio.futures import CancelledError
from typing import Tuple, Callable, Optional, List, Set, Dict, Any
from urllib.parse import urlparse, ParseResult
from math import ceil
from html import escape as html_escape
try:
import aiohttp
import aiohttp.hdrs as hdrs
import aiohttp.server
from aiohttp.multidict import CIMultiDictProxy
from aiohttp.protocol import RawRequestMessage
from aiohttp.streams import EmptyStreamReader
except ImportError as err:
print(
"Required module '{0}' not found. Try to run 'pip install {0}' to install it.".format(err.name),
file=sys.stderr)
exit(1)
if hasattr(socket, 'SO_KEEPALIVE'):
def tcp_keepalive(server, transport):
sock = transport.get_extra_info('socket')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
else:
def tcp_keepalive(server, transport): # pragma: no cover
pass
DEFAULT_ERROR_MESSAGE = """
<html>
<head>
<title>{status} {reason}</title>
</head>
<body>
<h1>{status} {reason}</h1>
{message}
</body>
</html>"""
EMPTY_PAYLOAD = aiohttp.streams.EmptyStreamReader()
RESPONSES = http.server.BaseHTTPRequestHandler.responses
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 8880
DEFAULT_CHUNK_DOWNLOAD_TIMEOUT = 10
DEFAULT_CHUNK_SIZE = 64 * 1024
DEFAULT_PARALLELS = 32
DEFAULT_MAX_WORKERS = 8
DEFAULT_PART_SIZE = DEFAULT_CHUNK_SIZE * DEFAULT_PARALLELS * DEFAULT_MAX_WORKERS
DEFAULT_WORKING_DIR = '.paraproxio'
DEFAULT_BUFFER_DIR = os.path.join(DEFAULT_WORKING_DIR, 'buffer')
DEFAULT_CACHE_DIR = os.path.join(DEFAULT_WORKING_DIR, 'cache')
DEFAULT_LOGS_DIR = os.path.join(DEFAULT_WORKING_DIR, 'logs')
DEFAULT_SERVER_LOG_FILENAME = 'paraproxio.server.log'
DEFAULT_ACCESS_LOG_FILENAME = 'paraproxio.access.log'
DEFAULT_PARACCESS_LOG_FORMAT = '%a %l %u %t "%r" %s %b "%{Referrer}i" "%{User-Agent}i" %{X-Parallels}o'
PARALLELS_HEADER = 'X-Parallels' # Used in responses. Value: number of parallel downloads used.
server_logger = logging.getLogger('paraproxio.server')
access_logger = logging.getLogger('paraproxio.access')
NOT_STARTED = 'NOT STARTED'
DOWNLOADING = 'DOWNLOADING'
DOWNLOADED = 'DOWNLOADED'
CANCELLED = 'CANCELLED'
_DOWNLOADER_STATES = {NOT_STARTED, DOWNLOADING, DOWNLOADED, CANCELLED}
files_to_parallel = ['.iso', '.zip', '.rpm', '.gz']
CACHE_INFO_FILE_NAME = 'info.json'
CACHE_BIN_FILE_NAME = 'file.bin'
CACHE_LAST_ACCESS_FILE_NAME = 'last-access'
DEFAULT_CACHE_MAX_AGE = 60 * 60 * 24 * 3 # 3 days in seconds
DEFAULT_CACHE_CLEANUP_INTERVAL = 60 * 10 # clean-up every 10 minutes
def need_file_to_parallel(url: str) -> bool:
pr = urlparse(url) # type: ParseResult
url, ext = os.path.splitext(pr.path)
return ext.lower() in files_to_parallel
def get_bytes_ranges_by_parts(length: int, parts: int) -> List[Tuple[int, int]]:
""" Get bytes ranges """
###################################################################################################
#
# length = 89
# parts = 5
# range_size = length // parts = 89 // 5 = 17
# last_range_size = range_size + length % parts = 17 + 89 % 5 = 17 + 4 = 21
#
# [<--range_size--->|<--range_size--->|<--range_size--->|<--range_size--->|<--last_range_size--->|
# [*****************|*****************|*****************|*****************|*****************|****]
# 0 17 34 51 68 85 89
#
###################################################################################################
range_size = length // parts
last_range_size = range_size + length % parts
last_range_idx = parts - 1
bytes_ranges = []
for part in range(0, last_range_idx):
bytes_range = (part * range_size, ((part + 1) * range_size) - 1)
bytes_ranges.append(bytes_range)
last_range_offset = last_range_idx * range_size
bytes_ranges.append((last_range_offset, last_range_offset + last_range_size - 1))
return bytes_ranges
def get_bytes_ranges_by_part_size(length: int, part_size: int) -> List[Tuple[int, int]]:
bytes_ranges = []
for offset in range(0, length, part_size):
left = length - offset
if left < part_size:
part_size = left
bytes_ranges.append((offset, offset + part_size - 1))
return bytes_ranges
def get_unique_name():
return str(time()).replace('.', '_')
class RangeDownloader:
def __init__(
self,
url: str,
bytes_range: Tuple[int, int],
buffer_file_path,
*,
loop: AbstractEventLoop = None,
server_logger=server_logger,
chunk_size=DEFAULT_CHUNK_SIZE,
chunk_download_timeout=DEFAULT_CHUNK_DOWNLOAD_TIMEOUT):
self._url = url
self._bytes_range = bytes_range
self._length = bytes_range[1] - bytes_range[0] + 1
self._buffer_file_path = buffer_file_path
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._server_logger = server_logger
self._chunk_size = chunk_size
self._chunk_download_timeout = chunk_download_timeout
self._headers = {'Range': 'bytes={0[0]!s}-{0[1]!s}'.format(self._bytes_range)}
self._bytes_downloaded = 0
self._state = NOT_STARTED
@property
def state(self) -> str:
return self._state
@property
def bytes_downloaded(self):
return self._bytes_downloaded
@property
def buffer_file_path(self):
return self._buffer_file_path
async def download(self) -> str:
if self._state != NOT_STARTED:
return self._state
# Prepare an empty buffer file.
await self._loop.run_in_executor(None, self._create_buffer_file)
try:
# Create client session for downloading a file part from a host.
async with aiohttp.ClientSession(loop=self._loop, headers=self._headers) as session:
# Request a host for a file part.
async with session.request('GET', self._url) as res: # type: aiohttp.ClientResponse
if res.status != 206:
raise WrongResponseError('Expected status code 206, but {!s} ({!s}) received.',
res.status,
res.reason)
hrh = res.headers # type: CIMultiDictProxy
# TODO: check headers.
# Read content by chunks and write to the buffer file.
if self._state == NOT_STARTED:
self._state = DOWNLOADING
while self._state is DOWNLOADING:
with aiohttp.Timeout(self._chunk_download_timeout, loop=self._loop):
chunk = await res.content.read(self._chunk_size)
self._bytes_downloaded += len(chunk)
self._debug("Read ({!s} bytes). Downloaded: {!s} of {!s} bytes. [{:.2%}]".format(
len(chunk), self._bytes_downloaded, self._length,
self._bytes_downloaded / self._length))
if not chunk:
self._state = DOWNLOADED
break
await self._write_chunk(chunk)
await self._flush_and_release()
if self._state != DOWNLOADED:
res.close() # Close the response if not downloaded.
except aiohttp.ServerDisconnectedError as exc:
self._debug('Server disconnected error: {!r}.'.format(exc))
self.cancel()
except WrongResponseError as exc:
self._debug('Wrong response error: {!r}.'.format(exc))
self.cancel()
except asyncio.TimeoutError:
self._debug('Timeout.')
self.cancel()
except Exception as exc:
self._debug('Unexpected exception: {!r}.'.format(exc))
self.cancel()
finally:
return self._state
def cancel(self):
self._debug('Cancel called.')
self._state = CANCELLED
async def _write_chunk(self, chunk):
await self._run_nonblocking(lambda: self._buffer_file.write(chunk))
def _flush_and_release(self):
def flush_and_release():
if not self._buffer_file:
return
self._buffer_file.flush()
self._buffer_file.close()
del self._buffer_file
return self._run_nonblocking(flush_and_release)
def _run_nonblocking(self, func):
return self._loop.run_in_executor(None, lambda: func())
def _create_buffer_file(self):
f = open(self._buffer_file_path, 'xb')
f.seek(self._length - 1)
f.write(b'0')
f.flush()
f.seek(0)
self._buffer_file = f
def _debug(self, msg, *args, **kwargs):
msg = "{!r} {!s}".format(self, msg)
self._server_logger.debug(msg, *args, **kwargs)
def __repr__(self, *args, **kwargs):
return '<RangeDownloader ({2!s}): [{0[0]!s}-{0[1]!s}] {1!r}>'.format(self._bytes_range, self._url, self._state)
class ParallelDownloader:
"""Parallel downloader"""
_state = NOT_STARTED
_downloaders = [] # type: List[RangeDownloader]
_downloads = set() # type: Set[Future]
_filename = None # type: str
_next_id = 0
def __init__(
self,
url: str,
file_length: int,
*,
parallels: int = DEFAULT_PARALLELS,
part_size: int = DEFAULT_PART_SIZE,
chunk_size: int = DEFAULT_CHUNK_SIZE,
loop: AbstractEventLoop = None,
server_logger=server_logger,
buffer_dir: str = DEFAULT_BUFFER_DIR):
assert parallels > 1
self._url = url
self._file_length = file_length
self._part_size = part_size
self._parallels = parallels
self._chunk_size = chunk_size
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._server_logger = server_logger
self._download_dir = os.path.join(buffer_dir, get_unique_name())
self._create_download_dir()
# Calculate bytes ranges.
self._bytes_ranges = get_bytes_ranges_by_part_size(self._file_length, self._part_size)
self._parts = len(self._bytes_ranges)
if self._parts < self._parallels:
self._bytes_ranges = get_bytes_ranges_by_parts(self._file_length, self._parallels)
self._parts = len(self._bytes_ranges)
self._state_condition = asyncio.Condition(loop=self._loop)
@property
def state(self) -> str:
return self._state
@property
def downloaders(self):
return self._downloaders
async def download(self):
if self._state == DOWNLOADING:
return
self._state = DOWNLOADING
# Create a downloader for each bytes range.
for i, bytes_range in enumerate(self._bytes_ranges):
filename = '{idx:03}_{range[0]!s}-{range[1]!s}.tmp'.format(idx=i, range=bytes_range)
buffer_file_path = os.path.join(self._download_dir, filename)
self._downloaders.append(
RangeDownloader(self._url,
bytes_range,
buffer_file_path,
loop=self._loop,
chunk_size=self._chunk_size))
# Start first single downloader for fast first part response to a client.
self._start_next_downloaders(1)
# Waiting for all downloads to complete.
try:
while self._state is DOWNLOADING and self._downloads:
done, self._downloads = await wait(self._downloads, loop=self._loop,
return_when=asyncio.FIRST_COMPLETED)
for dd in done: # type: Future
# Cancel downloading if any of completed downloads is not downloaded.
if dd.result() is not DOWNLOADED:
raise CancelledError()
self._start_next_downloaders()
# Notify all readers.
async with self._state_condition:
self._state_condition.notify_all()
except Exception as ex:
self._debug('Download failed. Error: {!r}.'.format(ex))
self.cancel()
# Notify all readers.
async with self._state_condition:
self._state_condition.notify_all()
raise DownloadError(ex)
else:
# OK. All done.
self._state = DOWNLOADED
def _start_next_downloaders(self, n=None):
"""Start next downloaders if needed according with parallels count."""
if not n:
n = self._parallels - len(self._downloads)
while n > 0 and self._next_id < self._parts:
downloader = self._downloaders[self._next_id]
self._next_id += 1
self._downloads.add(ensure_future(downloader.download(), loop=self._loop))
n -= 1
async def read(self, callback: Callable[[bytearray], Any]):
try:
for downloader in self._downloaders:
# Wait until downloader is not in a downloaded/cancelled state.
async with self._state_condition:
while downloader.state not in (DOWNLOADED, CANCELLED):
await self._state_condition.wait()
if downloader.state != DOWNLOADED:
self._debug('Downloader not in `DOWNLOADED` state, but in `{!s}`.'.format(downloader.state))
raise CancelledError()
# Open file and send all its bytes it to back.
await read_from_file_by_chunks(downloader.buffer_file_path, callback, self._chunk_size,
lambda: self._state != CANCELLED, loop=self._loop)
except Exception as exc:
raise ReadError(exc)
def cancel(self):
if self._state != DOWNLOADING:
return
self._state = CANCELLED
for downloader in self._downloaders: # type: RangeDownloader
downloader.cancel()
async def clear(self):
if self._state not in (DOWNLOADED, CANCELLED):
return
await self._clear()
async def _clear(self):
self._downloaders.clear()
await self._run_nonblocking(lambda: shutil.rmtree(self._download_dir, ignore_errors=True))
async def _run_nonblocking(self, func):
return await self._loop.run_in_executor(None, lambda: func())
def _create_download_dir(self):
if not os.path.exists(self._download_dir):
os.makedirs(self._download_dir, exist_ok=True)
def _debug(self, msg, *args, **kwargs):
msg = "{!r} {!s}".format(self, msg)
self._server_logger.debug(msg, *args, **kwargs)
def __repr__(self, *args, **kwargs):
return '<ParallelDownloader: {!r}>'.format(self._url)
INITIALIZING = 'INITIALIZING'
READY = 'READY'
def get_cache_bin_file_path(cache_entry_dir: str):
return os.path.join(cache_entry_dir, CACHE_BIN_FILE_NAME)
def get_cache_info_file_path(cache_entry_dir: str):
return os.path.join(cache_entry_dir, CACHE_INFO_FILE_NAME)
def get_cache_last_access_file_path(cache_entry_dir: str):
return os.path.join(cache_entry_dir, CACHE_LAST_ACCESS_FILE_NAME)
def create_new_cache_entry_dir(cache_dir: str):
cache_entry_dir = os.path.join(cache_dir, get_unique_name())
os.makedirs(cache_entry_dir, exist_ok=True)
return cache_entry_dir
async def read_from_file_by_chunks(
file_path: str,
callback: Callable[[bytearray], None],
chunk_size: int = DEFAULT_CHUNK_SIZE,
condition: Callable[[], bool] = lambda: True,
*,
loop):
chunk = bytearray(chunk_size)
with open(file_path, 'rb') as f:
while condition():
r = await loop.run_in_executor(None, lambda: f.readinto(chunk))
if not r:
break
if r < chunk_size:
callback(memoryview(chunk)[:r].tobytes())
else:
callback(chunk)
class CachingDownloader:
"""Downloader with caching."""
_state = INITIALIZING
_downloadings = {} # type: Dict[str, Tuple[ParallelDownloader, Future, Future]]
_uploadings = set() # type: Set[(str, Future)]
_cache = {} # type: Dict[str, str]
_last_access_file_lock = threading.Lock()
def __init__(self,
cache_dir: str,
parallels,
part_size,
chunk_size,
loop,
server_logger=server_logger):
self._cache_dir = cache_dir
self._parallels = parallels
self._part_size = part_size
self._chunk_size = chunk_size
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._server_logger = server_logger
self._state_condition = asyncio.Condition(loop=self._loop)
ensure_future(self._init_cache(), loop=self._loop)
async def download(self, url: str, head: CIMultiDictProxy, callback: Callable[[bytearray], None]) -> str:
await self._when_state(READY)
if not await self._upload_from_cache(url, head, callback):
await self._upload_with_pd(url, head, callback)
async def _upload_from_cache(self, url: str, head: CIMultiDictProxy, callback: Callable[[bytearray], None]) -> bool:
"""Upload from cache"""
cache_entry_dir = self._cache.get(url)
if not cache_entry_dir:
return False
try:
cache_info = await self._load_cache_info(cache_entry_dir)
except CacheError as exc:
self._debug('Bad cache entry deleted: {!r}. Error: {!r}.'.format(cache_entry_dir, exc))
self._delete_cache_entry(cache_entry_dir)
return False
up_to_date = head.get(hdrs.LAST_MODIFIED) == cache_info.get(hdrs.LAST_MODIFIED)
up_to_date &= head.get(hdrs.ETAG) == cache_info.get(hdrs.ETAG)
if not up_to_date:
self._debug('Outdated cache for: {!r} deleted.'.format(url))
await self._delete_cache_entry(cache_entry_dir)
return False
self._debug('Uploading (from cache): {!r}.'.format(url))
await self._update_last_access(cache_entry_dir)
cache_bin_file_path = get_cache_bin_file_path(cache_entry_dir)
# Open file and send all its bytes it to back.
coro_read = read_from_file_by_chunks(cache_bin_file_path, callback, self._chunk_size,
lambda: self._state != CANCELLED, loop=self._loop)
uploading = ensure_future(coro_read, loop=self._loop)
self._uploadings.add((url, uploading))
try:
await uploading
except ReadError:
self._debug('Read error.')
pass
except Exception as exc:
self._debug('Uploading failed with exception: {!r}.'.format(exc))
raise
finally:
self._uploadings.remove((url, uploading))
return True
async def _upload_with_pd(self, url, head: CIMultiDictProxy, callback: Callable[[bytearray], None]):
"""Upload using parallel downloader."""
content_length = head.get(hdrs.CONTENT_LENGTH)
assert content_length is not None
content_length = int(content_length)
self._debug('Uploading (from parallel downloader): {!r}.'.format(url))
# Start or join existed downloading task.
dl = self._downloadings.get(url)
if dl:
pd, downloading, caching = dl
else:
def downloading_done(_):
self._debug('Downloading {!r} done.'.format(url))
assert url in self._downloadings
del self._downloadings[url]
# Create parallel downloader.
pd = ParallelDownloader(url, content_length,
parallels=self._parallels,
part_size=self._part_size,
chunk_size=self._chunk_size,
loop=self._loop)
# Start downloading.
downloading = ensure_future(pd.download(), loop=self._loop)
downloading.add_done_callback(downloading_done)
# Write downloading content to a cache entry.
async def cache():
cache_entry_dir = create_new_cache_entry_dir(self._cache_dir)
try:
cache_bin_file_path = get_cache_bin_file_path(cache_entry_dir)
with open(cache_bin_file_path, 'xb') as cache_bin_file:
await pd.read(lambda chunk: cache_bin_file.write(chunk))
await self._update_last_access(cache_entry_dir)
cache_info = {
'URL': url,
hdrs.CONTENT_LENGTH: content_length,
hdrs.LAST_MODIFIED: head.get(hdrs.LAST_MODIFIED),
hdrs.ETAG: head.get(hdrs.ETAG)
}
await self._save_cache_info(cache_entry_dir, cache_info)
# Add to a cache index.
self._cache[url] = cache_entry_dir
except:
# Remove cache entry dir in case of error or cancellation.
await self._delete_cache_entry(cache_entry_dir)
raise
caching = ensure_future(cache(), loop=self._loop)
self._downloadings[url] = pd, downloading, caching
uploading = ensure_future(pd.read(callback), loop=self._loop)
self._uploadings.add((url, uploading))
try:
await uploading
except ReadError:
self._debug('Read error.')
pass
except Exception as exc:
self._debug('Uploading failed with exception: {!r}.'.format(exc))
raise
finally:
self._uploadings.remove((url, uploading))
if not self._uploadings:
await pd.clear()
async def _save_cache_info(self, cache_entry_dir: str, cache_info: Dict[str, str]):
def do():
cache_info_file_path = get_cache_info_file_path(cache_entry_dir)
with open(cache_info_file_path, 'w') as f:
json.dump(cache_info, f, indent=4)
await self._run_nb(do)
async def _load_cache_info(self, cache_entry_dir: str) -> Dict[str, str]:
def do():
cache_info_file_path = get_cache_info_file_path(cache_entry_dir)
if not os.path.isfile(cache_info_file_path):
raise CacheError('Cache info file problem.')
try:
with open(cache_info_file_path) as f:
return json.load(f) # type: Dict
except Exception as exc:
raise CacheError(exc)
return await self._run_nb(do)
async def _update_last_access(self, cache_entry_dir: str):
def do():
cache_last_access_file_path = get_cache_last_access_file_path(cache_entry_dir)
with self._last_access_file_lock, open(cache_last_access_file_path, 'w') as f:
f.write(str(time()))
await self._run_nb(do)
async def _init_cache(self):
for cache_entry_dir in os.listdir(self._cache_dir):
cache_entry_dir = os.path.join(self._cache_dir, cache_entry_dir)
if not os.path.isdir(cache_entry_dir):
continue
try:
cache_bin_file_path = get_cache_bin_file_path(cache_entry_dir)
if not os.path.isfile(cache_bin_file_path):
raise CacheError('Cache bin file problem.')
cache_info = await self._load_cache_info(cache_entry_dir)
url = cache_info.get('URL')
if not url:
raise CacheError('Bad cache info file.')
self._cache[url] = cache_entry_dir
except Exception as exc:
self._debug('Cannot load cache from dir: {!r}. Error: {!r}.'.format(cache_entry_dir, exc))
continue
async with self._state_condition:
self._state = READY
self._state_condition.notify_all()
async def _delete_cache_entry(self, cache_entry_dir):
try:
await self._run_nb(lambda: shutil.rmtree(cache_entry_dir))
except:
pass
async def _when_state(self, state):
async with self._state_condition:
while self._state is not state:
await self._state_condition.wait()
async def _run_nb(self, func):
return await self._loop.run_in_executor(None, lambda: func())
def _debug(self, msg, *args, **kwargs):
msg = "{!r} {!s}".format(self, msg)
self._server_logger.debug(msg, *args, **kwargs)
def __repr__(self, *args, **kwargs):
return '<CachingDownloader (D:{!s} U:{!s})>'.format(len(self._downloadings), len(self._uploadings))
class ParallelHttpRequestHandler(aiohttp.StreamProtocol):
_request_count = 0
_request_handler = None
_reading_request = False
_keep_alive = False # keep transport open
_keep_alive_handle = None # keep alive timer handle
_timeout_handle = None # slow request timer handle
_request_prefix = aiohttp.HttpPrefixParser() # http method parser
_request_parser = aiohttp.HttpRequestParser() # default request parser
def __init__(
self, manager, *, loop: AbstractEventLoop = None,
keep_alive=75,
keep_alive_on=True,
timeout=0,
server_logger=server_logger,
access_logger=access_logger,
access_log_format=DEFAULT_PARACCESS_LOG_FORMAT,
debug=False,
log=None,
parallels: int = DEFAULT_PARALLELS,
part_size: int = DEFAULT_PART_SIZE,
chunk_size: int = DEFAULT_CHUNK_SIZE,
buffer_dir: str = DEFAULT_BUFFER_DIR,
cached_downloader: CachingDownloader,
**kwargs):
super().__init__(
loop=loop,
disconnect_error=aiohttp.errors.ClientDisconnectedError, **kwargs)
self._keep_alive_on = keep_alive_on
self._keep_alive_period = keep_alive # number of seconds to keep alive
self._timeout = timeout # slow request timeout
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._server_logger = log or server_logger
self._debug = debug
self._access_logger = aiohttp.helpers.AccessLogger(access_logger, access_log_format) if access_logger else None
self._manager = manager
self._loop = loop
self._parallels = parallels
self._part_size = part_size
self._chunk_size = chunk_size
self._buffer_dir = buffer_dir
self._cached_downloader = cached_downloader
def check_request(self, message: RawRequestMessage):
if message.method == hdrs.METH_CONNECT:
self.handle_error(status=405, message=message)
raise UnsupportedError("Method '%s' is not supported." % message.method)
@property
def keep_alive_timeout(self):
return self._keep_alive_period
def closing(self, timeout=15.0):
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._keep_alive = False
self._keep_alive_on = False
self._keep_alive_period = None
if not self._reading_request and self.transport is not None:
if self._request_handler:
self._request_handler.cancel()
self._request_handler = None
self.transport.close()
self.transport = None
elif self.transport is not None and timeout:
if self._timeout_handle is not None:
self._timeout_handle.cancel()
# use slow request timeout for closing
# connection_lost cleans timeout handler
now = self._loop.time()
self._timeout_handle = self._loop.call_at(
ceil(now + timeout), self.cancel_slow_request)
def connection_made(self, transport):
super().connection_made(transport)
self._request_handler = ensure_future(self.start(), loop=self._loop)
# start slow request timer
if self._timeout:
now = self._loop.time()
self._timeout_handle = self._loop.call_at(
ceil(now + self._timeout), self.cancel_slow_request)
if self._keep_alive_on:
tcp_keepalive(self, transport)
self._manager.connection_made(self, transport)
def connection_lost(self, exc):
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
if self._request_handler is not None:
self._request_handler.cancel()
self._request_handler = None
if self._keep_alive_handle is not None:
self._keep_alive_handle.cancel()
self._keep_alive_handle = None
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = None
def data_received(self, data):
super().data_received(data)
# reading request
if not self._reading_request:
self._reading_request = True
# stop keep-alive timer
if self._keep_alive_handle is not None:
self._keep_alive_handle.cancel()
self._keep_alive_handle = None
def keep_alive(self, val):
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keep_alive = val
def log_access(self, message, environ, response, time):
if self._access_logger:
self._access_logger.log(message, environ, response, self.transport, time)
def log_debug(self, *args, **kw):
if self._debug:
self._server_logger.debug(*args, **kw)
def log_exception(self, *args, **kw):
self._server_logger.exception(*args, **kw)
def cancel_slow_request(self):
if self._request_handler is not None:
self._request_handler.cancel()
self._request_handler = None
if self.transport is not None:
self.transport.close()
self.log_debug('Close slow request.')
@asyncio.coroutine
def start(self):
"""Start processing of incoming requests.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
reader = self.reader
while True:
message = None
self._keep_alive = False
self._request_count += 1
self._reading_request = False
payload = None
try:
# read http request method
prefix = reader.set_parser(self._request_prefix)
yield from prefix.read()
# start reading request
self._reading_request = True
# start slow request timer
if self._timeout and self._timeout_handle is None:
now = self._loop.time()
self._timeout_handle = self._loop.call_at(
ceil(now + self._timeout), self.cancel_slow_request)
# read request headers
httpstream = reader.set_parser(self._request_parser)
message = yield from httpstream.read()
# cancel slow request timer
if self._timeout_handle is not None:
self._timeout_handle.cancel()
self._timeout_handle = None
# request may not have payload
if (message.headers.get(hdrs.CONTENT_LENGTH, 0) or
hdrs.SEC_WEBSOCKET_KEY1 in message.headers or
'chunked' in message.headers.get(
hdrs.TRANSFER_ENCODING, '')):
payload = aiohttp.streams.FlowControlStreamReader(
reader, loop=self._loop)
reader.set_parser(
aiohttp.HttpPayloadParser(message), payload)
else:
payload = EMPTY_PAYLOAD
yield from self.handle_request(message, payload)
except asyncio.CancelledError:
return
except aiohttp.errors.ClientDisconnectedError:
self.log_debug(
'Ignored premature client disconnection #1.')
return
except aiohttp.errors.HttpProcessingError as exc:
if self.transport is not None:
yield from self.handle_error(exc.code, message,
None, exc, exc.headers,
exc.message)
except aiohttp.errors.LineLimitExceededParserError as exc:
yield from self.handle_error(400, message, None, exc)
except Exception as exc:
yield from self.handle_error(500, message, None, exc)
finally:
if self.transport is None:
self.log_debug(
'Ignored premature client disconnection #2.')
return
if payload and not payload.is_eof():
self.log_debug('Uncompleted request.')
self._request_handler = None
self.transport.close()
return
else:
reader.unset_parser()
if self._request_handler:
if self._keep_alive and self._keep_alive_period:
self.log_debug(
'Start keep-alive timer for %s sec.',
self._keep_alive_period)
now = self._loop.time()
self._keep_alive_handle = self._loop.call_at(
ceil(now + self._keep_alive_period),
self.transport.close)
elif self._keep_alive and self._keep_alive_on:
# do nothing, rely on kernel or upstream server
pass
else:
self.log_debug('Close client connection.')
self._request_handler = None
self.transport.close()
return
else:
# connection is closed
return
async def handle_request(self, message: RawRequestMessage, payload):
now = self._loop.time()
self.check_request(message)
self.keep_alive(True)
if message.method == hdrs.METH_CONNECT:
response = await self.process_connect(message, payload)
else:
# Try to process parallel.
response = await self.process_parallel(message, payload)
# Otherwise process normally.
if not response:
response = await self.process_normally(message, payload)
self.log_access(message, None, response, self._loop.time() - now)
async def process_connect(self, message: RawRequestMessage, payload) -> aiohttp.Response:
# TODO: implement.
pass
async def process_normally(self, message: RawRequestMessage, payload) -> aiohttp.Response:
"""Process request normally."""
req_data = payload if not isinstance(payload, EmptyStreamReader) else None
# Request from a host.
try:
async with aiohttp.ClientSession(headers=message.headers, loop=self._loop) as session:
async with session.request(message.method, message.path,
data=req_data,
allow_redirects=False) as host_resp: # type: aiohttp.ClientResponse
client_res = aiohttp.Response(
self.writer, host_resp.status, http_version=message.version)
# Process host response headers.
for name, value in host_resp.headers.items():
if name == hdrs.CONTENT_ENCODING:
continue
if name == hdrs.CONTENT_LENGTH:
continue
if name == hdrs.TRANSFER_ENCODING:
if value.lower() == 'chunked':
client_res.enable_chunked_encoding()
client_res.add_header(name, value)
# Send headers to the client.
client_res.send_headers()
# Send a payload.
while True:
chunk = await host_resp.content.read(self._chunk_size)
if not chunk:
break
client_res.write(chunk)
if client_res.chunked or client_res.autochunked():
await client_res.write_eof()
return client_res
except aiohttp.ClientResponseError:
self.log_debug("CANCELLED {!s} {!r}.".format(message.method, message.path))
raise
async def process_parallel(self, message: RawRequestMessage, payload) -> aiohttp.Response:
"""Try process a request parallel. Returns True in case of processed parallel, otherwise False."""
# Checking the opportunity of parallel downloading.
if message.method != hdrs.METH_GET or not need_file_to_parallel(message.path):
return None
head = await self.get_file_head(message.path)
if head is None:
return None
accept_ranges = head.get(hdrs.ACCEPT_RANGES)
if not accept_ranges or accept_ranges.lower() != 'bytes':
return None
content_length = head.get(hdrs.CONTENT_LENGTH)
if content_length is None:
return None
content_length = int(content_length)
if content_length <= 0 or content_length < DEFAULT_PART_SIZE:
return None
# All checks pass, start a parallel downloading.
self.log_debug("PARALLEL GET {!r} [{!s} bytes].".format(message.path, content_length))
# Get additional file info.
content_type = head.get(hdrs.CONTENT_TYPE)
# Prepare a response to a client.
client_res = aiohttp.Response(self.writer, 200, http_version=message.version)
client_res.add_header(hdrs.CONTENT_LENGTH, str(content_length))
if content_type:
client_res.add_header(hdrs.CONTENT_TYPE, content_type)
client_res.add_header(PARALLELS_HEADER, str(self._parallels))
client_res.send_headers()
try:
await self._cached_downloader.download(message.path, head, lambda chunk: client_res.write(chunk))
client_res.write_eof()
except Exception as exc:
self.log_debug("CANCELLED PARALLEL GET {!r}. Caused by exception: {!r}.".format(message.path, exc))
raise
return client_res
def handle_error(self, status=500, message=None, payload=None, exc=None, headers=None, reason=None):
"""Handle errors.
Returns http response with specific status code. Logs additional
information. It always closes current connection."""
now = self._loop.time()
try:
if self._request_handler is None:
# client has been disconnected during writing.
return ()
if status == 500:
self.log_exception("Error handling request")
try:
if reason is None or reason == '':
reason, msg = RESPONSES[status]
else:
msg = reason
except KeyError:
status = 500
reason, msg = '???', ''
if self._debug and exc is not None:
try:
tb = traceback.format_exc()
tb = html_escape(tb)
msg += '<br><h2>Traceback:</h2>\n<pre>{}</pre>'.format(tb)
except:
pass
html = DEFAULT_ERROR_MESSAGE.format(
status=status, reason=reason, message=msg).encode('utf-8')
response = aiohttp.Response(self.writer, status, close=True)
response.add_header(hdrs.CONTENT_TYPE, 'text/html; charset=utf-8')
response.add_header(hdrs.CONTENT_LENGTH, str(len(html)))
if headers is not None:
for name, value in headers:
response.add_header(name, value)
response.send_headers()
response.write(html)
# disable CORK, enable NODELAY if needed
self.writer.set_tcp_nodelay(True)
drain = response.write_eof()
self.log_access(message, None, response, self._loop.time() - now)
return drain
finally:
self.keep_alive(False)
async def get_file_head(self, url: str) -> Optional[CIMultiDictProxy]:
"""Make a HEAD request to get a 'content-length' and 'accept-ranges' headers."""
self.log_debug('Getting a HEAD for url: {!s}.'.format(url))
try:
async with aiohttp.ClientSession(loop=self._loop) as session:
async with session.request(hdrs.METH_HEAD, url) as res: # type: aiohttp.ClientResponse
return res.headers
except Exception as exc:
self.log_debug("Could not get a HEAD for the {!r}. Error: {!r}.".format(url, exc))
return None
def get_client_address(self):
address, port = self.transport.get_extra_info('peername')
return '%s:%s' % (address, port)
class ParallelHttpRequestHandlerFactory:
def __init__(self, *,
handler_class=ParallelHttpRequestHandler,
loop=None,
server_logger=server_logger,
access_logger=access_logger,
**kwargs):
self._handler_class = handler_class
self._loop = loop
self._server_logger = server_logger
self._access_logger = access_logger
self._connections = {}
self._kwargs = kwargs
self.num_connections = 0
@property
def connections(self):
return list(self._connections.keys())
def connection_made(self, handler, transport):
self._connections[handler] = transport
def connection_lost(self, handler, exc=None):
if handler in self._connections:
del self._connections[handler]
async def _connections_cleanup(self):
sleep = 0.05
while self._connections:
await sleep(sleep, loop=self._loop)
if sleep < 5:
sleep *= 2
async def finish_connections(self, timeout=None):
# try to close connections in 90% of graceful timeout
timeout90 = None
if timeout:
timeout90 = timeout / 100 * 90
for handler in self._connections.keys():
handler.closing(timeout=timeout90)
if timeout:
try:
await wait_for(
self._connections_cleanup(), timeout, loop=self._loop)
except TimeoutError:
self._server_logger.warning(
"Not all connections are closed (pending: %d)",
len(self._connections))
for transport in self._connections.values():
transport.close()
self._connections.clear()
def __call__(self):
self.num_connections += 1
try:
return self._handler_class(
manager=self,
loop=self._loop,
server_logger=server_logger,
access_logger=access_logger,
**self._kwargs)
except:
server_logger.exception(
'Can not create request handler: {!r}'.format(self._handler_class))
class ParaproxioError(Exception):
pass
class WrongResponseError(ParaproxioError):
pass
class UnsupportedError(ParaproxioError):
pass
class DownloadError(ParaproxioError):
pass
class ReadError(ParaproxioError):
pass
class CacheError(ParaproxioError):
pass
def setup_dirs(*dirs):
for d in dirs:
os.makedirs(d, exist_ok=True)
def clean_dirs(*dirs):
for d in dirs:
shutil.rmtree(d, ignore_errors=True)
def get_args(args):
parser = argparse.ArgumentParser(prog="paraproxio",
description="An HTTP proxy with a parallel downloading of big files.")
parser.add_argument("-H", "--host", type=str, default=DEFAULT_HOST, help="host address")
parser.add_argument("-P", "--port", type=int, default=DEFAULT_PORT, help="port")
parser.add_argument("--parallels", type=int, default=DEFAULT_PARALLELS, help="parallel downloads of a big file")
parser.add_argument("--part-size", type=int, default=DEFAULT_PART_SIZE, help="part size of a parallel download")
parser.add_argument("--max-workers", type=int, default=DEFAULT_MAX_WORKERS, help="max workers of executor")
parser.add_argument("--chunk-size", type=int, default=DEFAULT_CHUNK_SIZE, help="chunk size")
parser.add_argument("--buffer-dir", type=str, default=DEFAULT_BUFFER_DIR, help="buffer dir")
parser.add_argument("--cache-dir", type=str, default=DEFAULT_CACHE_DIR, help="cache dir")
parser.add_argument("--logs-dir", type=str, default=DEFAULT_LOGS_DIR, help="logs dir")
parser.add_argument("--cache-max-age", type=int, default=DEFAULT_CACHE_MAX_AGE, help="max age of a cache entry")
parser.add_argument("--debug", default=False, action="store_true", help="enable debug information in the stdout")
parser.add_argument("--clean-all", default=False, action="store_true", help="clean all temp files before start")
parser.add_argument("--version", action="version", version=PARAPROXIO_VERSION)
pargs = parser.parse_args(args)
pargs.buffer_dir = os.path.abspath(pargs.buffer_dir)
pargs.cache_dir = os.path.abspath(pargs.cache_dir)
pargs.logs_dir = os.path.abspath(pargs.logs_dir)
return pargs
class PeriodicTask(object):
_handler = None
def __init__(self, action: Callable[[], None], interval: int, *, loop):
self._action = action
self._interval = interval
self._loop = loop
def _run(self):
self._action()
self.start()
def start(self):
self._handler = self._loop.call_later(self._interval, self._run)
def stop(self):
self._handler.cancel()
class Paraproxio:
def __init__(self, args=None, loop=None, enable_logging=True):
self._args = get_args(args)
self._loop = loop
self._enable_logging = enable_logging
clean_dirs(self._args.buffer_dir)
if self._args.clean_all:
clean_dirs(self._args.logs_dir, self._args.cache_dir)
setup_dirs(self._args.buffer_dir, self._args.logs_dir, self._args.cache_dir)
# Create an event loop.
self._autoclose_loop = False
if self._loop is None:
self._autoclose_loop = True
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
# Create custom executor.
executor = concurrent.futures.ThreadPoolExecutor(self._args.max_workers)
self._loop.set_default_executor(executor)
if self._enable_logging:
self._setup_logging(self._args.logs_dir,
DEFAULT_SERVER_LOG_FILENAME,
DEFAULT_ACCESS_LOG_FILENAME,
debug=self._args.debug)
self._cached_downloader = CachingDownloader(cache_dir=self._args.cache_dir,
parallels=self._args.parallels,
part_size=self._args.part_size,
chunk_size=self._args.chunk_size,
loop=self._loop)
def run_forever(self):
cache_cleaner = PeriodicTask(self._clean_old_cache_entries, DEFAULT_CACHE_CLEANUP_INTERVAL, loop=self._loop)
cache_cleaner.start()
handler_factory = ParallelHttpRequestHandlerFactory(loop=self._loop, debug=self._args.debug,
parallels=self._args.parallels,
part_size=self._args.part_size,
chunk_size=self._args.chunk_size,
buffer_dir=self._args.buffer_dir,
cached_downloader=self._cached_downloader,
keep_alive=75)
srv = self._loop.run_until_complete(self._loop.create_server(handler_factory, self._args.host, self._args.port))
print('Paraproxio serving on', srv.sockets[0].getsockname())
if self._args.debug:
print('Debug mode.')
try:
self._loop.run_forever()
except KeyboardInterrupt:
pass
finally:
cache_cleaner.stop()
srv.close()
self._loop.run_until_complete(srv.wait_closed())
self._loop.run_until_complete(handler_factory.finish_connections(timeout=15))
if self._autoclose_loop:
self._loop.close()
if self._enable_logging:
self._release_logging()
def _clean_old_cache_entries(self):
cache_dir = self._args.cache_dir
max_age = self._args.cache_max_age
for cache_entry_dir in os.listdir(cache_dir):
cache_entry_dir = os.path.join(cache_dir, cache_entry_dir)
if not os.path.isdir(cache_entry_dir):
continue
try:
cache_last_access_file_path = get_cache_last_access_file_path(cache_entry_dir)
if not os.path.isfile(cache_last_access_file_path):
raise CacheError('Cache bin file problem.')
with open(cache_last_access_file_path) as f:
last_access = float(f.read())
if time() - last_access > max_age:
shutil.rmtree(cache_entry_dir)
except:
continue
def _setup_logging(
self,
logs_dir: str,
server_log_filename: str,
access_log_filename: str,
*,
debug=False):
# Set levels.
level = logging.DEBUG if debug else logging.INFO
server_logger.setLevel(level)
access_logger.setLevel(level)
# stderr handler.
self._stderr_handler = logging.StreamHandler(sys.stderr)
self._stderr_handler.setLevel(level)
server_logger.addHandler(self._stderr_handler)
access_logger.addHandler(self._stderr_handler)
# Server log file handler.
self._sl_handler = logging.FileHandler(os.path.join(logs_dir, server_log_filename))
self._sl_handler.setLevel(level)
server_logger.addHandler(self._sl_handler)
# Access log file handler.
self._al_handler = logging.FileHandler(os.path.join(logs_dir, access_log_filename))
self._al_handler.setLevel(level)
access_logger.addHandler(self._al_handler)
def _release_logging(self):
server_logger.removeHandler(self._stderr_handler)
access_logger.removeHandler(self._stderr_handler)
server_logger.removeHandler(self._sl_handler)
access_logger.removeHandler(self._al_handler)
if __name__ == '__main__':
paraproxio = Paraproxio()
paraproxio.run_forever()
|
SnakeJenny/TensorFlow
|
refs/heads/master
|
tensorflow/python/kernel_tests/bcast_ops_test.py
|
102
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.gen_array_ops import _broadcast_args
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class BcastOpsTest(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBasic(self):
r = self._GetBroadcastShape([2, 3, 5], [1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 1, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 1, 5])
self.assertAllEqual(r, [2, 3, 5])
def testBasicGradient(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
self.assertAllEqual(r0, [0, 1, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0])
r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
self.assertAllEqual(r0, [0])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
self.assertAllEqual(r0, [1])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [1])
def testZeroDims(self):
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
def testZeroDimsGradient(self):
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 3])
r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1, 3])
self.assertAllEqual(r1, [])
if __name__ == "__main__":
test.main()
|
GhostThrone/django
|
refs/heads/master
|
django/core/cache/backends/filebased.py
|
428
|
"File-based cache backend"
import errno
import glob
import hashlib
import io
import os
import random
import tempfile
import time
import zlib
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.files.move import file_move_safe
from django.utils.encoding import force_bytes
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class FileBasedCache(BaseCache):
cache_suffix = '.djcache'
def __init__(self, dir, params):
super(FileBasedCache, self).__init__(params)
self._dir = os.path.abspath(dir)
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version):
return False
self.set(key, value, timeout, version)
return True
def get(self, key, default=None, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
try:
with io.open(fname, 'rb') as f:
if not self._is_expired(f):
return pickle.loads(zlib.decompress(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
pass # Cache file was removed after the exists check
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
self._createdir() # Cache dir can be deleted at any time.
fname = self._key_to_file(key, version)
self._cull() # make some room if necessary
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with io.open(fd, 'wb') as f:
expiry = self.get_backend_timeout(timeout)
f.write(pickle.dumps(expiry, -1))
f.write(zlib.compress(pickle.dumps(value), -1))
file_move_safe(tmp_path, fname, allow_overwrite=True)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def delete(self, key, version=None):
self._delete(self._key_to_file(key, version))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except OSError as e:
# ENOENT can happen if the cache file is removed (by another
# process) after the os.path.exists check.
if e.errno != errno.ENOENT:
raise
def has_key(self, key, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
with io.open(fname, 'rb') as f:
return not self._is_expired(f)
return False
def _cull(self):
"""
Removes random cache entries if max_entries is reached at a ratio
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
that the entire cache will be purged.
"""
filelist = self._list_cache_files()
num_entries = len(filelist)
if num_entries < self._max_entries:
return # return early if no culling is required
if self._cull_frequency == 0:
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
# Delete a random selection of entries
filelist = random.sample(filelist,
int(num_entries / self._cull_frequency))
for fname in filelist:
self._delete(fname)
def _createdir(self):
if not os.path.exists(self._dir):
try:
os.makedirs(self._dir, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise EnvironmentError(
"Cache directory '%s' does not exist "
"and could not be created'" % self._dir)
def _key_to_file(self, key, version=None):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the key and a suffix.
"""
key = self.make_key(key, version=version)
self.validate_key(key)
return os.path.join(self._dir, ''.join(
[hashlib.md5(force_bytes(key)).hexdigest(), self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _is_expired(self, f):
"""
Takes an open cache file and determines if it has expired,
deletes the file if it is has passed its expiry time.
"""
exp = pickle.load(f)
if exp is not None and exp < time.time():
f.close() # On Windows a file has to be closed before deleting
self._delete(f.name)
return True
return False
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
filelist = [os.path.join(self._dir, fname) for fname
in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
return filelist
|
williammc/gtest
|
refs/heads/master
|
test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
webgeodatavore/django
|
refs/heads/master
|
tests/requests/tests.py
|
87
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import time
from datetime import datetime, timedelta
from io import BytesIO
from itertools import chain
from django.core.exceptions import SuspiciousOperation
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import (
HttpRequest, HttpResponse, RawPostDataException, UnreadablePostError,
parse_cookie,
)
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
from django.test.utils import str_prefix
from django.utils import six
from django.utils.encoding import force_str
from django.utils.http import cookie_date, urlencode
from django.utils.six.moves import http_cookies
from django.utils.six.moves.urllib.parse import urlencode as original_urlencode
from django.utils.timezone import utc
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(list(request.META.keys()), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = request.path_info = '/;some/?awful/=path/foo:bar/'
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = request.path_info = '/foo#bar'
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<HttpRequest: GET '/somepath/'>"))
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': BytesIO(b'')})
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(set(request.META.keys()), {'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'})
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_with_script_name(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the SCRIPT_NAME has a trailing slash.
Refs #20169.
"""
# With trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_with_force_script_name(self):
"""
Ensure that the FORCE_SCRIPT_NAME setting takes precedence over the
request's SCRIPT_NAME environment parameter.
Refs #20169.
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the FORCE_SCRIPT_NAME setting has a trailing slash.
Refs #20169.
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/'>"))
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/somepath/'>"))
def test_wsgirequest_path_info(self):
def wsgi_str(path_info):
path_info = path_info.encode('utf-8') # Actual URL sent by the browser (bytestring)
if six.PY3:
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid@key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertIn(
datetime_cookie['expires'],
# Slight time dependency; refs #23450
('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT')
)
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time() + 10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertIn('; %s' % http_cookies.Morsel._reserved['httponly'], str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_unicode_cookie(self):
"Verify HttpResponse.set_cookie() works with unicode data."
response = HttpResponse()
cookie_value = '清風'
response.set_cookie('test', cookie_value)
self.assertEqual(force_str(cookie_value), response.cookies['test'].value)
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(2), b'na')
self.assertRaises(RawPostDataException, lambda: request.body)
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(original_urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertRaises(RawPostDataException, lambda: request.body)
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.body
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'x'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.FILES
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_raw_uri(self):
factory = RequestFactory(HTTP_HOST='evil.com')
request = factory.get('////absolute-uri')
self.assertEqual(request.get_raw_uri(), 'http://evil.com//absolute-uri')
request = factory.get('/?foo=bar')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/?foo=bar')
request = factory.get('/path/with:colons')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/path/with:colons')
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'example.com@evil.tld',
'example.com:dr.frankenstein@evil.tld',
'example.com:dr.frankenstein@evil.tld:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_PORT=False)
def test_get_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Shouldn't use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '8080')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(USE_X_FORWARDED_PORT=True)
def test_get_port_with_x_forwarded_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Should use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '80')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_disabled_in_debug_mode(self):
"""If ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass."""
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
}
self.assertEqual(request.get_host(), 'example.com')
# Invalid hostnames would normally raise a SuspiciousOperation,
# but we have DEBUG=True, so this check is disabled.
request = HttpRequest()
request.META = {
'HTTP_HOST': "invalid_hostname.com",
}
self.assertEqual(request.get_host(), "invalid_hostname.com")
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punnycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, host),
request.get_host
)
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, domain),
request.get_host
)
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_invalid_host % host,
request.get_host
)
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion2 % "invalid_hostname.com",
request.get_host
)
class BuildAbsoluteURITestCase(SimpleTestCase):
"""
Regression tests for ticket #18314.
"""
def setUp(self):
self.factory = RequestFactory()
def test_build_absolute_uri_no_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when the ``location`` argument is not provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(),
'http://testserver//absolute-uri'
)
def test_build_absolute_uri_absolute_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when an absolute URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='http://example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_schema_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a schema-relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='//example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='/foo/bar/'),
'http://testserver/foo/bar/'
)
|
wimnat/ansible-modules-extras
|
refs/heads/devel
|
windows/win_scheduled_task.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_scheduled_task
version_added: "2.0"
short_description: Manage scheduled tasks
description:
- Manage scheduled tasks
notes:
- This module requires Windows Server 2012 or later.
options:
name:
description:
- Name of the scheduled task
required: true
description:
description:
- The description for the scheduled task
required: false
enabled:
description:
- Enable/disable the task
choices:
- yes
- no
default: yes
state:
description:
- State that the task should become
required: true
choices:
- present
- absent
user:
description:
- User to run scheduled task as
required: false
execute:
description:
- Command the scheduled task should execute
required: false
argument:
description:
- Arguments to provide scheduled task action
required: false
frequency:
description:
- The frequency of the command, not idempotent
required: false
choices:
- daily
- weekly
time:
description:
- Time to execute scheduled task, not idempotent
required: false
days_of_week:
description:
- Days of the week to run a weekly task, not idempotent
required: false
path:
description:
- Task folder in which this task will be stored
default: '\'
'''
EXAMPLES = '''
# Create a scheduled task to open a command prompt
win_scheduled_task: name="TaskName" execute="cmd" frequency="daily" time="9am" description="open command prompt" path="example" enable=yes state=present user=SYSTEM
'''
|
oplinkoms/onos
|
refs/heads/master
|
tools/test/topos/uk.py
|
4
|
#!/usr/bin/env python
"""
"""
from mininet.topo import Topo
class UkTopo( Topo ):
"""Switches projected onto the UK map"""
def addSwitch( self, name, **opts ):
kwargs = { 'protocols' : 'OpenFlow13' }
kwargs.update( opts )
return super(UkTopo, self).addSwitch( name, **kwargs )
def __init__( self ):
"""Create a topology."""
# Initialize Topology
Topo.__init__( self )
# add nodes, switches first...
LONDON = self.addSwitch( 's1' )
BRISTL = self.addSwitch( 's2' )
BIRMHM = self.addSwitch( 's3' )
PLYMTH = self.addSwitch( 's4' )
DOVER = self.addSwitch( 's5' )
BRGHTN = self.addSwitch( 's6' )
LIVRPL = self.addSwitch( 's7' )
YORK = self.addSwitch( 's8' )
NWCSTL = self.addSwitch( 's9' )
NRWICH = self.addSwitch( 's10' )
EDBUGH = self.addSwitch( 's11' )
ABYSTW = self.addSwitch( 's12' )
# ... and now hosts
LONDON_host = self.addHost( 'h1' )
BRISTL_host = self.addHost( 'h2' )
BIRMHM_host = self.addHost( 'h3' )
PLYMTH_host = self.addHost( 'h4' )
DOVER_host = self.addHost( 'h5' )
BRGHTN_host = self.addHost( 'h6' )
LIVRPL_host = self.addHost( 'h7' )
YORK_host = self.addHost( 'h8' )
NWCSTL_host = self.addHost( 'h9' )
NRWICH_host = self.addHost( 'h10' )
EDBUGH_host = self.addHost( 'h11' )
ABYSTW_host = self.addHost( 'h12' )
# add edges between switch and corresponding host
self.addLink( LONDON, LONDON_host )
self.addLink( BRISTL, BRISTL_host )
self.addLink( BIRMHM, BIRMHM_host )
self.addLink( PLYMTH, PLYMTH_host )
self.addLink( DOVER, DOVER_host )
self.addLink( BRGHTN, BRGHTN_host )
self.addLink( LIVRPL, LIVRPL_host )
self.addLink( YORK, YORK_host )
self.addLink( NWCSTL, NWCSTL_host )
self.addLink( NRWICH, NRWICH_host )
self.addLink( EDBUGH, EDBUGH_host )
self.addLink( ABYSTW, ABYSTW_host )
# add edges between switches
self.addLink( BIRMHM, LIVRPL )
self.addLink( BIRMHM, YORK )
self.addLink( BRISTL, ABYSTW )
self.addLink( BRISTL, BIRMHM )
self.addLink( BRISTL, PLYMTH )
self.addLink( DOVER, BRGHTN )
self.addLink( DOVER, NRWICH )
self.addLink( LIVRPL, ABYSTW )
self.addLink( LIVRPL, EDBUGH )
self.addLink( LONDON, BIRMHM )
self.addLink( LONDON, BRGHTN )
self.addLink( LONDON, BRISTL )
self.addLink( LONDON, BRISTL )
self.addLink( LONDON, DOVER )
self.addLink( LONDON, NRWICH )
self.addLink( LONDON, PLYMTH )
self.addLink( LONDON, YORK )
self.addLink( LONDON, YORK )
self.addLink( NWCSTL, EDBUGH )
self.addLink( NWCSTL, LIVRPL )
self.addLink( NWCSTL, YORK )
self.addLink( YORK, LIVRPL )
self.addLink( YORK, NRWICH )
topos = { 'uk': ( lambda: UkTopo() ) }
if __name__ == '__main__':
from onosnet import run
run( UkTopo() )
|
wwj718/edx-video
|
refs/heads/master
|
cms/djangoapps/contentstore/views/user.py
|
8
|
import json
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django_future.csrf import ensure_csrf_cookie
from mitxmako.shortcuts import render_to_response
from django.core.context_processors import csrf
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import Location
from xmodule.error_module import ErrorDescriptor
from contentstore.utils import get_lms_link_for_item
from util.json_request import JsonResponse
from auth.authz import (
STAFF_ROLE_NAME, INSTRUCTOR_ROLE_NAME, get_course_groupname_for_role)
from course_creators.views import (
get_course_creator_status, add_user_with_status_unrequested,
user_requested_access)
from .access import has_access
from student.models import CourseEnrollment
@login_required
@ensure_csrf_cookie
def index(request):
"""
List all courses available to the logged in user
"""
courses = modulestore('direct').get_items(['i4x', None, None, 'course', None])
# filter out courses that we don't have access too
def course_filter(course):
return (has_access(request.user, course.location)
# TODO remove this condition when templates purged from db
and course.location.course != 'templates'
and course.location.org != ''
and course.location.course != ''
and course.location.name != '')
courses = filter(course_filter, courses)
def format_course_for_view(course):
return (
course.display_name,
reverse("course_index", kwargs={
'org': course.location.org,
'course': course.location.course,
'name': course.location.name,
}),
get_lms_link_for_item(
course.location
),
course.display_org_with_default,
course.display_number_with_default,
course.location.name
)
return render_to_response('index.html', {
'courses': [format_course_for_view(c) for c in courses if not isinstance(c, ErrorDescriptor)],
'user': request.user,
'request_course_creator_url': reverse('request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'csrf': csrf(request)['csrf_token']
})
@require_POST
@login_required
def request_course_creator(request):
"""
User has requested course creation access.
"""
user_requested_access(request.user)
return JsonResponse({"Status": "OK"})
@login_required
@ensure_csrf_cookie
def manage_users(request, org, course, name):
'''
This view will return all CMS users who are editors for the specified course
'''
location = Location('i4x', org, course, 'course', name)
# check that logged in user has permissions to this item
if not has_access(request.user, location, role=INSTRUCTOR_ROLE_NAME) and not has_access(request.user, location, role=STAFF_ROLE_NAME):
raise PermissionDenied()
course_module = modulestore().get_item(location)
staff_groupname = get_course_groupname_for_role(location, "staff")
staff_group, __ = Group.objects.get_or_create(name=staff_groupname)
inst_groupname = get_course_groupname_for_role(location, "instructor")
inst_group, __ = Group.objects.get_or_create(name=inst_groupname)
return render_to_response('manage_users.html', {
'context_course': course_module,
'staff': staff_group.user_set.all(),
'instructors': inst_group.user_set.all(),
'allow_actions': has_access(request.user, location, role=INSTRUCTOR_ROLE_NAME),
})
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def course_team_user(request, org, course, name, email):
location = Location('i4x', org, course, 'course', name)
# check that logged in user has permissions to this item
if has_access(request.user, location, role=INSTRUCTOR_ROLE_NAME):
# instructors have full permissions
pass
elif has_access(request.user, location, role=STAFF_ROLE_NAME) and email == request.user.email:
# staff can only affect themselves
pass
else:
msg = {
"error": _("Insufficient permissions")
}
return JsonResponse(msg, 400)
try:
user = User.objects.get(email=email)
except:
msg = {
"error": _("Could not find user by email address '{email}'.").format(email=email),
}
return JsonResponse(msg, 404)
# role hierarchy: "instructor" has more permissions than "staff" (in a course)
roles = ["instructor", "staff"]
if request.method == "GET":
# just return info about the user
msg = {
"email": user.email,
"active": user.is_active,
"role": None,
}
# what's the highest role that this user has?
groupnames = set(g.name for g in user.groups.all())
for role in roles:
role_groupname = get_course_groupname_for_role(location, role)
if role_groupname in groupnames:
msg["role"] = role
break
return JsonResponse(msg)
# can't modify an inactive user
if not user.is_active:
msg = {
"error": _('User {email} has registered but has not yet activated his/her account.').format(email=email),
}
return JsonResponse(msg, 400)
# make sure that the role groups exist
groups = {}
for role in roles:
groupname = get_course_groupname_for_role(location, role)
group, __ = Group.objects.get_or_create(name=groupname)
groups[role] = group
if request.method == "DELETE":
# remove all roles in this course from this user: but fail if the user
# is the last instructor in the course team
instructors = set(groups["instructor"].user_set.all())
staff = set(groups["staff"].user_set.all())
if user in instructors and len(instructors) == 1:
msg = {
"error": _("You may not remove the last instructor from a course")
}
return JsonResponse(msg, 400)
if user in instructors:
user.groups.remove(groups["instructor"])
if user in staff:
user.groups.remove(groups["staff"])
user.save()
return JsonResponse()
# all other operations require the requesting user to specify a role
if request.META.get("CONTENT_TYPE", "").startswith("application/json") and request.body:
try:
payload = json.loads(request.body)
except:
return JsonResponse({"error": _("malformed JSON")}, 400)
try:
role = payload["role"]
except KeyError:
return JsonResponse({"error": _("`role` is required")}, 400)
else:
if not "role" in request.POST:
return JsonResponse({"error": _("`role` is required")}, 400)
role = request.POST["role"]
if role == "instructor":
if not has_access(request.user, location, role=INSTRUCTOR_ROLE_NAME):
msg = {
"error": _("Only instructors may create other instructors")
}
return JsonResponse(msg, 400)
user.groups.add(groups["instructor"])
user.save()
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user, location.course_id)
elif role == "staff":
# if we're trying to downgrade a user from "instructor" to "staff",
# make sure we have at least one other instructor in the course team.
instructors = set(groups["instructor"].user_set.all())
if user in instructors:
if len(instructors) == 1:
msg = {
"error": _("You may not remove the last instructor from a course")
}
return JsonResponse(msg, 400)
user.groups.remove(groups["instructor"])
user.groups.add(groups["staff"])
user.save()
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user, location.course_id)
return JsonResponse()
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.MITX_FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.MITX_FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
|
FreekingDean/insteon-hub
|
refs/heads/master
|
insteon/api.py
|
1
|
from urllib.parse import urlencode
from threading import Lock
import json
import sys
import requests
import time
API_URL = "https://connect.insteon.com"
class APIError(Exception):
"""API Error Response
Attributes:
msg -- the error message
code -- the error code
"""
def __init__(self, data):
self.data = data
class InsteonAPI(object):
def __init__(self, authorizer, client_id, user_agent):
self.authorizer = authorizer
self.user_agent = user_agent
self.client_id = client_id
self._stream_device_callbacks = {}
self._lock = Lock()
def get(self, path, data=''):
'''Perform GET Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.get(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.get, path, data)
def post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, data)
def put(self, path, data={}):
'''Perform PUT Request'''
response = requests.put(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.put, path, data)
def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data)
def stream(self, path, devices_to_watch={}):
headers = self._set_headers()
headers['Content-Type'] = 'text/event-stream'
response = None
try:
while True:
response = requests.get(API_URL + path, headers = headers, stream=True)
for line in response.iter_lines():
# filter out keep-alive new lines
if line:
decoded_line = line.decode('utf-8')
payload = decoded_line.split(': ')
self._handle_stream_message(payload[0], payload[1], devices_to_watch)
except Exception as e:
print(e)
if response != None:
response.connection.close()
def _add_device_callback_for_stream(self, device, callback):
self._lock.acquire()
try:
self._stream_device_callbacks[device.DeviceID] = callback
finally:
self._lock.release()
def _handle_stream_message(self, message_type, payload, devices_to_watch):
self.stream_message_mappings[message_type](self, payload, devices_to_watch)
def _set_stream_event(self, event_name, *_):
self._current_stream_event = event_name
def _handle_stream_data(self, data, devices_to_watch):
parsed_data = json.loads(data)
changed_device = next([x for x in devices_to_watch if x.InsteonID == parsed_data['device_insteon_id']].__iter__(), None)
if changed_device != None:
changed_device.set_status(parsed_data['status'])
self._lock.acquire()
try:
if changed_device.DeviceID in self._stream_device_callbacks:
self._stream_device_callbacks[changed_device.DeviceID](parsed_data['status'])
finally:
self._lock.release()
stream_message_mappings = {'event': _set_stream_event, 'data': _handle_stream_data}
def _check_response(self, response, calling_method, path, data={}):
if response.status_code >= 400:
if response.status_code == 401 and response.json()['code'] == 4012:
self.authorizer.authorize()
calling_method(path, data)
else:
raise APIError(response.json())
if response.status_code == 204:
return True
return response.json()
def _set_headers(self):
return {
"Content-Type": "application/json",
"Authentication": "APIKey " + self.client_id,
"Authorization": "Bearer " + self.authorizer.access_token
}
@classmethod
def unauth_post(cls, path, data):
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(API_URL + '/api/v2/oauth2/token', data=data, headers=headers)
return response.json()
class InsteonResource(object):
base_path="/api/v2/"
def all(cls, api):
resources = []
try:
response = api.get(cls.base_path + cls.resource_name, {'properties':'all'})
for data in response[cls.resource_name[:-1].title()+"List"]:
resources.append(cls(api, data[cls.resource_name[:-1].title()+"ID"], data))
return resources
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
def __init__(self, api, resource_id=None, data=None):
for data_key in self._properties:
setattr(self, "_" + data_key, None)
self._resource_id = resource_id
self._api_iface = api
self._cached_status = None
if data:
self._update_details(data)
else:
self.reload_details
def __getattr__(self, name):
if name in self._properties:
return getattr(self, "_"+name)
else:
print(name)
raise AttributeError
def __setattr__(self, name, value):
if name in self._properties:
if name in self._settables:
self.__dict__["_"+name] = value
else:
raise "Property not settable"
else:
self.__dict__[name] = value
def _update_details(self, data):
#Intakes dict of details, and sets necessary properties in device
for api_name in self._properties:
if api_name in data:
setattr(self, "_" + api_name, data[api_name])
def reload_details(self):
#Query hub and refresh all properties
try:
data = self._api_iface.get(self.base_path+ self.resource_name + "/" + str(self._resource_id))
print(data)
self._update_details(data)
except APIError as e:
print("API error: ")
for key,value in e.data.iteritems:
print(str(key) + ": " + str(value))
def save(self):
data = {}
for settable_name in self._settables:
data[settable_name] = getattr(self, settable_name)
try:
return self._api_iface.put(self.base_path + self.resource_name + "/" + str(self._resource_id), data=data)
except APIError as e:
print("API error: ")
for key,value in e.data.items():
print(str(key) + ": " + str(value))
def set_status(self, status):
self._cached_status = status
@property
def status(self):
return self._cached_status
@property
def json(self):
json_data = {}
for attribute in self._properties:
json_data[attribute] = getattr(self, "_" + attribute)
return json.dumps(json_data)
class InsteonCommandable(InsteonResource):
command_path = "commands"
def send_command(self, command, payload=None, level=None, wait=False):
data = {
'device_id': getattr(self, "DeviceID"),
'command': command
}
if command in ['on', 'off', 'fast_on', 'fast_off']:
self.set_status(command)
if payload:
for key in payload:
data[key] = payload[key]
if level:
data['level'] = level
try:
command_info = self._api_iface.post(self.base_path + self.command_path, data)
if wait:
commandId = command_info['id']
commandStatus = command_info['status']
while commandStatus == 'pending':
time.sleep(0.4)
command_info = self._api_iface.get(self.base_path + self.command_path + "/" + str(commandId))
commandStatus = command_info['status']
return command_info
except APIError as e:
print("API error: executing command " + str(command) + " on " + self.DeviceName)
print(vars(e))
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/cognitiveservices/azure-cognitiveservices-search-newssearch/azure/cognitiveservices/search/newssearch/models/_news_search_client_enums.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ErrorCode(str, Enum):
none = "None"
server_error = "ServerError"
invalid_request = "InvalidRequest"
rate_limit_exceeded = "RateLimitExceeded"
invalid_authorization = "InvalidAuthorization"
insufficient_authorization = "InsufficientAuthorization"
class ErrorSubCode(str, Enum):
unexpected_error = "UnexpectedError"
resource_error = "ResourceError"
not_implemented = "NotImplemented"
parameter_missing = "ParameterMissing"
parameter_invalid_value = "ParameterInvalidValue"
http_not_allowed = "HttpNotAllowed"
blocked = "Blocked"
authorization_missing = "AuthorizationMissing"
authorization_redundancy = "AuthorizationRedundancy"
authorization_disabled = "AuthorizationDisabled"
authorization_expired = "AuthorizationExpired"
class Freshness(str, Enum):
day = "Day"
week = "Week"
month = "Month"
class SafeSearch(str, Enum):
off = "Off"
moderate = "Moderate"
strict = "Strict"
class TextFormat(str, Enum):
raw = "Raw"
html = "Html"
|
Celedhrim/persomov
|
refs/heads/master
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/firstpost.py
|
35
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class FirstpostIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.firstpost.com/india/india-to-launch-indigenous-aircraft-carrier-monday-1025403.html',
'md5': 'ee9114957692f01fb1263ed87039112a',
'info_dict': {
'id': '1025403',
'ext': 'mp4',
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
'description': 'md5:feef3041cb09724e0bdc02843348f5f4',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id)
title = self._html_search_meta('twitter:title', page, 'title')
description = self._html_search_meta('twitter:description', page, 'title')
data = self._download_xml(
'http://www.firstpost.com/getvideoxml-%s.xml' % video_id, video_id,
'Downloading video XML')
item = data.find('./playlist/item')
thumbnail = item.find('./image').text
formats = [
{
'url': details.find('./file').text,
'format_id': details.find('./label').text.strip(),
'width': int(details.find('./width').text.strip()),
'height': int(details.find('./height').text.strip()),
} for details in item.findall('./source/file_details') if details.find('./file').text
]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
|
unaizalakain/django
|
refs/heads/master
|
tests/gis_tests/geo3d/views.py
|
6027
|
# Create your views here.
|
rabipanda/tensorflow
|
refs/heads/master
|
tensorflow/contrib/py2tf/converters/continue_canonicalization.py
|
2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import templates
class ContinueCanonicalizationTransformer(gast.NodeTransformer):
"""Canonicalizes continue statements into additional conditionals."""
def __init__(self, namer):
self.namer = namer
# This is a stack structure, to correctly process nested loops.
self.continuation_uses = []
def _create_continuation_check(self):
template = """
if not var_name:
pass
"""
cond, = templates.replace(template, var_name=self.continuation_uses[-1][1])
cond.body = []
return cond
def _create_continuation_trigger(self):
template = """
var_name = True
"""
assign, = templates.replace(
template, var_name=self.continuation_uses[-1][1])
return assign
def _create_continuation_init(self):
template = """
var_name = False
"""
assign, = templates.replace(
template, var_name=self.continuation_uses[-1][1])
return assign
def _visit_and_reindent_if_necessary(self, nodes):
reorganized_nodes = []
current_dest = reorganized_nodes
continue_used_in_block = False
for i, n in enumerate(nodes):
# TODO(mdan): This could be optimized if control structures are simple.
self.continuation_uses[-1][0] = False
n = self.visit(n)
current_dest.append(n)
if self.continuation_uses[-1][0]:
continue_used_in_block = True
if i < len(nodes) - 1: # Last statement in block needs no protection.
cond = self._create_continuation_check()
current_dest.append(cond)
current_dest = cond.body
self.continuation_uses[-1][0] = continue_used_in_block
return reorganized_nodes
def _process_loop_block(self, block, scope):
cont_var = self.namer.new_symbol('cont_requested', scope.referenced)
self.continuation_uses.append([False, cont_var])
block = self._visit_and_reindent_if_necessary(block)
if self.continuation_uses[-1][0]:
block.insert(0, self._create_continuation_init())
self.continuation_uses.pop()
return block
def visit_While(self, node):
self.generic_visit(node.test)
node.body = self._process_loop_block(node.body,
anno.getanno(node, 'body_scope'))
for n in node.orelse:
self.generic_visit(n)
return node
def visit_For(self, node):
self.generic_visit(node.target)
self.generic_visit(node.iter)
node.body = self._process_loop_block(node.body,
anno.getanno(node, 'body_scope'))
for n in node.orelse:
self.generic_visit(n)
return node
def visit_If(self, node):
if self.continuation_uses:
self.generic_visit(node.test)
node.body = self._visit_and_reindent_if_necessary(node.body)
continue_used_in_body = self.continuation_uses[-1][0]
node.orelse = self._visit_and_reindent_if_necessary(node.orelse)
self.continuation_uses[-1][0] = (
continue_used_in_body or self.continuation_uses[-1][0])
else:
node = self.generic_visit(node)
return node
def visit_Continue(self, node):
self.continuation_uses[-1][0] = True
return self._create_continuation_trigger()
def visit_Break(self, node):
assert False, 'break statement should be desugared at this point'
def transform(node, namer):
transformer = ContinueCanonicalizationTransformer(namer)
node = transformer.visit(node)
return node
|
Qalthos/ansible
|
refs/heads/devel
|
test/units/modules/cloud/docker/test_docker_swarm_service.py
|
15
|
import pytest
class APIErrorMock(Exception):
def __init__(self, message, response=None, explanation=None):
self.message = message
self.response = response
self.explanation = explanation
@pytest.fixture(autouse=True)
def docker_module_mock(mocker):
docker_module_mock = mocker.MagicMock()
docker_utils_module_mock = mocker.MagicMock()
docker_errors_module_mock = mocker.MagicMock()
docker_errors_module_mock.APIError = APIErrorMock
mock_modules = {
'docker': docker_module_mock,
'docker.utils': docker_utils_module_mock,
'docker.errors': docker_errors_module_mock,
}
return mocker.patch.dict('sys.modules', **mock_modules)
@pytest.fixture(autouse=True)
def docker_swarm_service():
from ansible.modules.cloud.docker import docker_swarm_service
return docker_swarm_service
def test_retry_on_out_of_sequence_error(mocker, docker_swarm_service):
run_mock = mocker.MagicMock(
side_effect=APIErrorMock(
message='',
response=None,
explanation='rpc error: code = Unknown desc = update out of sequence',
)
)
manager = docker_swarm_service.DockerServiceManager(client=None)
manager.run = run_mock
with pytest.raises(APIErrorMock):
manager.run_safe()
assert run_mock.call_count == 3
def test_no_retry_on_general_api_error(mocker, docker_swarm_service):
run_mock = mocker.MagicMock(
side_effect=APIErrorMock(message='', response=None, explanation='some error')
)
manager = docker_swarm_service.DockerServiceManager(client=None)
manager.run = run_mock
with pytest.raises(APIErrorMock):
manager.run_safe()
assert run_mock.call_count == 1
def test_get_docker_environment(mocker, docker_swarm_service):
env_file_result = {'TEST1': 'A', 'TEST2': 'B', 'TEST3': 'C'}
env_dict = {'TEST3': 'CC', 'TEST4': 'D'}
env_string = "TEST3=CC,TEST4=D"
env_list = ['TEST3=CC', 'TEST4=D']
expected_result = sorted(['TEST1=A', 'TEST2=B', 'TEST3=CC', 'TEST4=D'])
mocker.patch.object(
docker_swarm_service, 'parse_env_file', return_value=env_file_result
)
mocker.patch.object(
docker_swarm_service,
'format_environment',
side_effect=lambda d: ['{0}={1}'.format(key, value) for key, value in d.items()],
)
# Test with env dict and file
result = docker_swarm_service.get_docker_environment(
env_dict, env_files=['dummypath']
)
assert result == expected_result
# Test with env list and file
result = docker_swarm_service.get_docker_environment(
env_list,
env_files=['dummypath']
)
assert result == expected_result
# Test with env string and file
result = docker_swarm_service.get_docker_environment(
env_string, env_files=['dummypath']
)
assert result == expected_result
assert result == expected_result
# Test with empty env
result = docker_swarm_service.get_docker_environment(
[], env_files=None
)
assert result == []
# Test with empty env_files
result = docker_swarm_service.get_docker_environment(
None, env_files=[]
)
assert result == []
def test_get_nanoseconds_from_raw_option(docker_swarm_service):
value = docker_swarm_service.get_nanoseconds_from_raw_option('test', None)
assert value is None
value = docker_swarm_service.get_nanoseconds_from_raw_option('test', '1m30s535ms')
assert value == 90535000000
value = docker_swarm_service.get_nanoseconds_from_raw_option('test', 10000000000)
assert value == 10000000000
with pytest.raises(ValueError):
docker_swarm_service.get_nanoseconds_from_raw_option('test', [])
def test_has_dict_changed(docker_swarm_service):
assert not docker_swarm_service.has_dict_changed(
{"a": 1},
{"a": 1},
)
assert not docker_swarm_service.has_dict_changed(
{"a": 1},
{"a": 1, "b": 2}
)
assert docker_swarm_service.has_dict_changed(
{"a": 1},
{"a": 2, "b": 2}
)
assert docker_swarm_service.has_dict_changed(
{"a": 1, "b": 1},
{"a": 1}
)
assert not docker_swarm_service.has_dict_changed(
None,
{"a": 2, "b": 2}
)
assert docker_swarm_service.has_dict_changed(
{},
{"a": 2, "b": 2}
)
assert docker_swarm_service.has_dict_changed(
{"a": 1},
{}
)
assert docker_swarm_service.has_dict_changed(
{"a": 1},
None
)
assert not docker_swarm_service.has_dict_changed(
{},
{}
)
assert not docker_swarm_service.has_dict_changed(
None,
None
)
assert not docker_swarm_service.has_dict_changed(
{},
None
)
assert not docker_swarm_service.has_dict_changed(
None,
{}
)
def test_has_list_of_dicts_changed(docker_swarm_service):
assert docker_swarm_service.has_list_of_dicts_changed(
[
{"a": 1},
{"b": 1}
],
[
{"a": 1}
]
)
assert docker_swarm_service.has_list_of_dicts_changed(
[
{"a": 1},
],
[
{"a": 1},
{"b": 1},
]
)
assert not docker_swarm_service.has_list_of_dicts_changed(
[
{"a": 1},
{"b": 1},
],
[
{"a": 1},
{"b": 1}
]
)
assert not docker_swarm_service.has_list_of_dicts_changed(
None,
[
{"b": 1},
{"a": 1}
]
)
assert docker_swarm_service.has_list_of_dicts_changed(
[],
[
{"b": 1},
{"a": 1}
]
)
assert not docker_swarm_service.has_list_of_dicts_changed(
None,
None
)
assert not docker_swarm_service.has_list_of_dicts_changed(
[],
None
)
assert not docker_swarm_service.has_list_of_dicts_changed(
None,
[]
)
assert not docker_swarm_service.has_list_of_dicts_changed(
[
{"src": 1, "dst": 2},
{"src": 1, "dst": 2, "protocol": "udp"},
],
[
{"src": 1, "dst": 2, "protocol": "tcp"},
{"src": 1, "dst": 2, "protocol": "udp"},
]
)
assert not docker_swarm_service.has_list_of_dicts_changed(
[
{"src": 1, "dst": 2, "protocol": "udp"},
{"src": 1, "dst": 3, "protocol": "tcp"},
],
[
{"src": 1, "dst": 2, "protocol": "udp"},
{"src": 1, "dst": 3, "protocol": "tcp"},
]
)
assert docker_swarm_service.has_list_of_dicts_changed(
[
{"src": 1, "dst": 2, "protocol": "udp"},
{"src": 1, "dst": 2},
{"src": 3, "dst": 4},
],
[
{"src": 1, "dst": 3, "protocol": "udp"},
{"src": 1, "dst": 2, "protocol": "tcp"},
{"src": 3, "dst": 4, "protocol": "tcp"},
]
)
assert docker_swarm_service.has_list_of_dicts_changed(
[
{"src": 1, "dst": 3, "protocol": "tcp"},
{"src": 1, "dst": 2, "protocol": "udp"},
],
[
{"src": 1, "dst": 2, "protocol": "tcp"},
{"src": 1, "dst": 2, "protocol": "udp"},
]
)
assert docker_swarm_service.has_list_of_dicts_changed(
[
{"src": 1, "dst": 2, "protocol": "udp"},
{"src": 1, "dst": 2, "protocol": "tcp", "extra": {"test": "foo"}},
],
[
{"src": 1, "dst": 2, "protocol": "udp"},
{"src": 1, "dst": 2, "protocol": "tcp"},
]
)
|
praekeltfoundation/docker-xylem
|
refs/heads/master
|
twisted/plugins/docker_xylem_plugin.py
|
1
|
import yaml
from zope.interface import implements
from twisted.python import filepath, usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from twisted.application import internet
from twisted.web import server
from docker_xylem import service
class Options(usage.Options):
optParameters = [
["config", "c", "xylem-plugin.yml", "Config file"],
]
class DockerServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "docker_xylem"
description = "A docker plugin service for xylem"
options = Options
def makeService(self, options):
config = yaml.load(open(options['config']))
sockfp = filepath.FilePath("/run/docker/plugins/xylem.sock")
if not sockfp.parent().exists():
sockfp.parent().makedirs()
return internet.UNIXServer(
config.get('socket', sockfp.path),
server.Site(service.DockerService(config)))
serviceMaker = DockerServiceMaker()
|
Ictp/indico
|
refs/heads/master
|
bin/migration/mapAspects.py
|
1
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.core.db import DBMgr
from MaKaC.rb_location import Location, MapAspect
from MaKaC.plugins.RoomBooking.default.factory import Factory
aspects = [
{'id': 0, 'name':'Meyrin', 'centerLatitude': 46.23456689405093, 'centerLongitude': 6.046686172485352, 'topLeftLatitude': '46.225660710473136', 'topLeftLongitude': '6.030035018920898', 'bottomRightLatitude': '46.2434716324829', 'bottomRightLongitude': '6.063294410705566', 'zoomLevel':15, 'defaultOnStartup': True},
{'id': 1, 'name':'PREVESSIN', 'centerLatitude': 46.259051447415175, 'centerLongitude': 6.057773351931246, 'topLeftLatitude': '46.2501492379416', 'topLeftLongitude': '6.041107177734375', 'bottomRightLatitude': '46.26795221179669', 'bottomRightLongitude': '6.074366569519043', 'zoomLevel':15, 'defaultOnStartup': False},
{'id': 2, 'name':'POINT 1', 'centerLatitude': 46.23573201283012, 'centerLongitude': 6.054509639707248, 'topLeftLatitude': '46.23350564968721', 'topLeftLongitude': '6.050344705581665', 'bottomRightLatitude': '46.23795828565159', 'bottomRightLongitude': '6.058659553527832', 'zoomLevel':17, 'defaultOnStartup': False},
{'id': 3, 'name':'POINT 2', 'centerLatitude': 46.25115822762375, 'centerLongitude': 6.020456314054172, 'topLeftLatitude': '46.24893249040227', 'topLeftLongitude': '6.016291379928589', 'bottomRightLatitude': '46.253383874525866', 'bottomRightLongitude': '6.024606227874756', 'zoomLevel':17, 'defaultOnStartup': False},
{'id': 4, 'name':'POINT 5', 'centerLatitude': 46.30958858268458, 'centerLongitude': 6.077267646724067, 'topLeftLatitude': '46.30736521774798', 'topLeftLongitude': '6.073100566864014', 'bottomRightLatitude': '46.31181185731005', 'bottomRightLongitude': '6.081415414810181', 'zoomLevel':17, 'defaultOnStartup': False},
{'id': 5, 'name':'POINT 6', 'centerLatitude': 46.29345231426436, 'centerLongitude': 6.1115119456917455, 'topLeftLatitude': '46.29122829396059', 'topLeftLongitude': '6.107347011566162', 'bottomRightLatitude': '46.295676244254715', 'bottomRightLongitude': '6.115661859512329', 'zoomLevel':17, 'defaultOnStartup': False},
{'id': 6, 'name':'POINT 8', 'centerLatitude': 46.24158691675184, 'centerLongitude': 6.097038745847385, 'topLeftLatitude': '46.2393607911537', 'topLeftLongitude': '6.092873811721802', 'bottomRightLatitude': '46.24381295202931', 'bottomRightLongitude': '6.101188659667969', 'zoomLevel':17, 'defaultOnStartup': False},
]
DBMgr.getInstance().startRequest()
Factory.getDALManager().connect()
location = Location.parse('CERN')
for aspectData in aspects:
aspect = MapAspect()
aspect.updateFromDictionary(aspectData)
location.addAspect(aspect)
DBMgr.getInstance().endRequest()
|
web30s/odoo-9.0c-20160402
|
refs/heads/master
|
hello/templates/openerp/addons/bus/models/__init__.py
|
67
|
# -*- coding: utf-8 -*-
import bus
import bus_presence
import res_users
import res_partner
|
CacaoMovil/guia-de-cacao-django
|
refs/heads/master
|
cacao_app/cacao/templatetags/cacao_tags.py
|
1
|
from pyquery import PyQuery as pq
from django import template
from django.conf import settings
from django.core.cache import get_cache
cache = get_cache('default')
register = template.Library()
@register.simple_tag
def settings_value(name):
'''
{% settings_value "LANGUAGE_CODE" %}
'''
return getattr(settings, name, "")
@register.filter
def offline_media(value):
if getattr(settings, "USE_PERSEUS", False):
new_val = value.replace('/media/', 'static/')
imgs = pq(new_val)('img')
media_urls = cache.get('media_urls') or []
for img in imgs:
src = pq(img).attr('src').replace('static/', '')
if src not in media_urls:
media_urls.append(src)
cache.set('media_urls', media_urls)
return new_val
else:
return value
@register.assignment_tag
def use_perseus():
return settings.USE_PERSEUS
|
PieterMostert/Lipgloss
|
refs/heads/master
|
view/other_restriction_editor.py
|
1
|
# LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.
# Contact: pi.mostert@gmail.com
import tkinter.messagebox
from numbers import Number
from .dragmanager import *
from .main_window import MainWindow
from .vert_scrolled_frame import VerticalScrolledFrame
from .pretty_names import prettify
class DisplayOtherRestriction:
"""A class used to display the line corresponding to a restriction in the other restriction editor"""
def __init__(self, index, core_data, frame):
ot = core_data.other_dict[index]
self.delete_button = ttk.Button(master=frame, text='X', width=2)
self.name_entry = Entry(master=frame, width=20)
self.numerator_coefs_entry = Entry(master=frame, width=30)
self.normalization_entry = Entry(master=frame, width=30)
self.def_low_entry = Entry(master=frame, width=12)
self.def_upp_entry = Entry(master=frame, width=12)
self.dec_pt_entry = Entry(master=frame, width=10)
self.name_entry.insert(0, ot.name)
self.numerator_coefs_entry.insert(0, ot.numerator_coefs)
self.normalization_entry.insert(0, ot.normalization)
self.def_low_entry.insert(0, ot.def_low)
self.def_upp_entry.insert(0, ot.def_upp)
self.dec_pt_entry.insert(0, ot.dec_pt)
def display(self, pos, order):
self.delete_button.grid(row=pos, column=0)
self.name_entry.grid(row=pos, column=1, padx=3, pady=3)
self.numerator_coefs_entry.grid(row=pos, column=2, padx=3, pady=3)
self.normalization_entry.grid(row=pos, column=3, padx=3, pady=3)
self.def_low_entry.grid(row=pos, column=4, padx=3, pady=3)
self.def_upp_entry.grid(row=pos, column=5, padx=3, pady=3)
self.dec_pt_entry.grid(row=pos, column=6, padx=3, pady=3)
def delete(self):
for widget in [self.delete_button, self.name_entry, self.numerator_coefs_entry, self.normalization_entry, \
self.def_low_entry, self.def_upp_entry, self.dec_pt_entry]:
widget.destroy()
class OtherRestrictionEditor(MainWindow):
"""Window that lets users enter / delete other restrictions, and rearrange the order in which they are displayed"""
def __init__(self, core_data, order, reorder_other_restrictions):
self.toplevel = Toplevel()
self.toplevel.title("Other Restriction Editor")
self.other_restriction_editor_headings = Frame(self.toplevel)
self.other_restriction_editor_headings.pack()
self.i_e_scrollframe = VerticalScrolledFrame(self.toplevel)
self.i_e_scrollframe.frame_height = 200
self.i_e_scrollframe.pack()
other_restriction_editor_buttons = Frame(self.toplevel)
other_restriction_editor_buttons.pack()
# Place the headings on the other_restriction_editor. There is some not-entirely-successful fiddling involved to try
# to get the headings to match up with their respective columns:
Label(master=self.other_restriction_editor_headings, text='', width=5).grid(row=0, column=0) # Blank label above the delete buttons
Label(master=self.other_restriction_editor_headings, text='', width=5).grid(row=0, column=1) # Blank label above the delete buttons
Label(master=self.other_restriction_editor_headings, text=' Restriction Name', width=20).grid(row=0, column=2)
Label(master=self.other_restriction_editor_headings, text='Numerator Coefficients', width=30).grid(row=0, column=3)
Label(master=self.other_restriction_editor_headings, text='Normalization', width=20).grid(row=0, column=4)
Label(master=self.other_restriction_editor_headings, text='Def lower bnd', width=12).grid(row=0, column=5)
Label(master=self.other_restriction_editor_headings, text='Def upper bnd', width=12).grid(row=0, column=6)
Label(master=self.other_restriction_editor_headings, text='Dec places', width=10).grid(row=0, column=7)
Label(master=self.other_restriction_editor_headings, text='', width=5).grid(row=0, column=8) # Blank label above the scrollbar
Label(master=self.other_restriction_editor_headings, text='', width=5).grid(row=0, column=9) # Blank label above the scrollbar
# Create drag manager for restriction rows:
self.ing_dnd = DragManager(reorder_other_restrictions)
# Create and display the rows:
self.display_other_restrictions = {}
for r, i in enumerate(order["other"]):
self.display_other_restrictions[i] = DisplayOtherRestriction(i, core_data, self.i_e_scrollframe.interior)
self.display_other_restrictions[i].display(r, order)
self.ing_dnd.add_dragable(self.display_other_restrictions[i].name_entry) # This lets you drag the row corresponding to a restriction by right-clicking on its name
# This label is hack to make sure that when a new other restriction is added, you don't have to scroll down to see it:
Label(master=self.i_e_scrollframe.interior).grid(row=9000)
self.new_other_restr_button = ttk.Button(other_restriction_editor_buttons, text='New restriction', width=20)
self.new_other_restr_button.pack(side='left')
self.update_button = ttk.Button(other_restriction_editor_buttons, text='Update', width=20)
self.update_button.pack(side='right')
self.i_e_scrollframe.interior.focus_force()
def new_other_restriction(self, i, core_data, order):
self.display_other_restrictions[i] = DisplayOtherRestriction(i, core_data, self.i_e_scrollframe.interior)
self.display_other_restrictions[i].display(int(i), order)
self.ing_dnd.add_dragable(self.display_other_restrictions[i].name_entry)
|
ilexius/odoo
|
refs/heads/master
|
addons/mrp_byproduct/__init__.py
|
45
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import mrp_byproduct
|
robmadole/jig
|
refs/heads/develop
|
src/jig/commands/tests/test_install.py
|
1
|
# coding=utf-8
from os.path import join
from textwrap import dedent
from jig.exc import ForcedExit
from jig.tests.testcase import (
CommandTestCase, PluginTestCase, cd_gitrepo, result_with_hint)
from jig.commands.hints import USE_RUNNOW
from jig.commands import install
class TestInstallCommand(CommandTestCase, PluginTestCase):
"""
Test the install command.
"""
command = install.Command
def setUp(self):
super(TestInstallCommand, self).setUp()
self.plugin01_dir = join(self.fixturesdir, 'plugin01')
self.plugin02_dir = join(self.fixturesdir, 'plugin02')
@cd_gitrepo
def test_plugins_file_does_not_exist(self):
"""
The specified file does not exist.
"""
with self.assertRaises(ForcedExit):
self.run_command('badfilename.txt')
self.assertResults(
'No such file or directory',
self.error)
@cd_gitrepo
def test_install_local_plugin(self):
"""
Can install a single local file system plugin.
"""
# Create a file to store the location of plugins to install
self.commit(
self.gitrepodir, 'jigplugins.txt',
self.plugin01_dir)
self.run_command('jigplugins.txt')
self.assertResults(
result_with_hint(dedent(
u'''
From {0}:
- Added plugin plugin01 in bundle test01
'''.format(self.plugin01_dir)),
USE_RUNNOW),
self.output)
@cd_gitrepo
def test_skips_duplicates(self):
"""
If a duplicate is being installed, skips it.
"""
self.commit(
self.gitrepodir, 'jigplugins.txt',
'{0}\n{0}\n'.format(self.plugin01_dir))
self.run_command('jigplugins.txt')
self.assertResults(
result_with_hint(dedent(
u'''
From {0}:
- Added plugin plugin01 in bundle test01
From {0}:
- The plugin is already installed.
'''.format(self.plugin01_dir)),
USE_RUNNOW),
self.output)
@cd_gitrepo
def test_plugins_has_one_error(self):
"""
The specified exists but the first location is a bad plugin.
Confirms that the install continues even if an error is found.
"""
self.commit(
self.gitrepodir, 'jigplugins.txt',
'{0}\n{1}\n'.format(
self.plugin02_dir, self.plugin01_dir))
self.run_command('jigplugins.txt')
self.assertResults(result_with_hint(dedent(
u'''
From {0}:
- File contains parsing errors: {0}/config.cfg
\t[line 2]: 'This is a bad config file that will fail to parse\\n'
From {1}:
- Added plugin plugin01 in bundle test01
'''.format(self.plugin02_dir, self.plugin01_dir)), USE_RUNNOW),
self.output)
|
tracierenea/gnuradio
|
refs/heads/master
|
gr-digital/python/digital/gmsk.py
|
7
|
#
# GMSK modulation and demodulation.
#
#
# Copyright 2005-2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
from math import pi
from pprint import pprint
import inspect
import numpy
from gnuradio import gr, blocks, analog, filter
import modulation_utils
import digital_swig as digital
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bt = 0.35
_def_verbose = False
_def_log = False
_def_gain_mu = None
_def_mu = 0.5
_def_freq_error = 0.0
_def_omega_relative_limit = 0.005
# FIXME: Figure out how to make GMSK work with pfb_arb_resampler_fff for both
# transmit and receive so we don't require integer samples per symbol.
# /////////////////////////////////////////////////////////////////////////////
# GMSK modulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_mod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
modulation.
The input is a byte stream (unsigned char with packed bits)
and the output is the complex modulated signal at baseband.
Args:
samples_per_symbol: samples per baud >= 2 (integer)
bt: Gaussian filter bandwidth * symbol time (float)
verbose: Print information about modulator? (boolean)
log: Print modulation data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bt=_def_bt,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "gmsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
samples_per_symbol = int(samples_per_symbol)
self._samples_per_symbol = samples_per_symbol
self._bt = bt
self._differential = False
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
ntaps = 4 * samples_per_symbol # up to 3 bits in filter at once
sensitivity = (pi / 2) / samples_per_symbol # phase change per bit = pi / 2
# Turn it into NRZ data.
#self.nrz = digital.bytes_to_syms()
self.unpack = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)
self.nrz = digital.chunks_to_symbols_bf([-1, 1], 1)
# Form Gaussian filter
# Generate Gaussian response (Needs to be convolved with window below).
self.gaussian_taps = filter.firdes.gaussian(
1, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
ntaps # number of taps
)
self.sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(self.gaussian_taps),numpy.array(self.sqwave))
self.gaussian_filter = filter.interp_fir_filter_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = analog.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.unpack, self.nrz, self.gaussian_filter, self.fmmod, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "Gaussian filter bt = %.2f" % self._bt
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.nrz,
blocks.file_sink(gr.sizeof_float, "nrz.dat"))
self.connect(self.gaussian_filter,
blocks.file_sink(gr.sizeof_float, "gaussian_filter.dat"))
self.connect(self.fmmod,
blocks.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
def add_options(parser):
"""
Adds GMSK modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# GMSK demodulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_demod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
demodulation.
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (the LSB)
Args:
samples_per_symbol: samples per baud (integer)
gain_mu: controls rate of mu adjustment (float)
mu: fractional delay [0.0, 1.0] (float)
omega_relative_limit: sets max variation in omega (float)
freq_error: bit rate error as a fraction (float)
verbose: Print information about modulator? (boolean)
log: Print modualtion data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
freq_error=_def_freq_error,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "gmsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._gain_mu = gain_mu
self._mu = mu
self._omega_relative_limit = omega_relative_limit
self._freq_error = freq_error
self._differential = False
if samples_per_symbol < 2:
raise TypeError, "samples_per_symbol >= 2, is %f" % samples_per_symbol
self._omega = samples_per_symbol*(1+self._freq_error)
if not self._gain_mu:
self._gain_mu = 0.175
self._gain_omega = .25 * self._gain_mu * self._gain_mu # critically damped
# Demodulate FM
sensitivity = (pi / 2) / samples_per_symbol
self.fmdemod = analog.quadrature_demod_cf(1.0 / sensitivity)
# the clock recovery block tracks the symbol clock and resamples as needed.
# the output of the block is a stream of soft symbols (float)
self.clock_recovery = digital.clock_recovery_mm_ff(self._omega, self._gain_omega,
self._mu, self._gain_mu,
self._omega_relative_limit)
# slice the floats at 0, outputting 1 bit (the LSB of the output byte) per sample
self.slicer = digital.binary_slicer_fb()
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.fmdemod, self.clock_recovery, self.slicer, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "M&M clock recovery omega = %f" % self._omega
print "M&M clock recovery gain mu = %f" % self._gain_mu
print "M&M clock recovery mu = %f" % self._mu
print "M&M clock recovery omega rel. limit = %f" % self._omega_relative_limit
print "frequency error = %f" % self._freq_error
def _setup_logging(self):
print "Demodulation logging turned on."
self.connect(self.fmdemod,
blocks.file_sink(gr.sizeof_float, "fmdemod.dat"))
self.connect(self.clock_recovery,
blocks.file_sink(gr.sizeof_float, "clock_recovery.dat"))
self.connect(self.slicer,
blocks.file_sink(gr.sizeof_char, "slicer.dat"))
def add_options(parser):
"""
Adds GMSK demodulation-specific options to the standard parser
"""
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="M&M clock recovery gain mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="M&M clock recovery mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--omega-relative-limit", type="float", default=_def_omega_relative_limit,
help="M&M clock recovery omega relative limit [default=%default] (GMSK/PSK)")
parser.add_option("", "--freq-error", type="float", default=_def_freq_error,
help="M&M clock recovery frequency error [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_demod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('gmsk', gmsk_mod)
modulation_utils.add_type_1_demod('gmsk', gmsk_demod)
|
shaufi10/odoo
|
refs/heads/8.0
|
openerp/addons/base/module/wizard/__init__.py
|
365
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_module_update
import base_language_install
import base_import_language
import base_module_upgrade
import base_module_configuration
import base_export_language
import base_update_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mschmittfull/nbodykit
|
refs/heads/master
|
nbodykit/core/algorithms/PairCountCorrelation.py
|
2
|
from nbodykit.core import Algorithm, DataSource
import numpy
import os
def binning_type(s):
"""
Type conversion for use on the command-line that converts
a string to an array of bin edges
"""
if os.path.isfile(s):
return numpy.loadtxt(s)
else:
supported = ["`linspace: min max Nbins`", "`logspace: logmin logmax Nbins`"]
try:
f, params = s.split(':')
params = list(map(float, params.split()))
params[-1] = int(params[-1]) + 1
if not hasattr(numpy, f): raise Exception
if len(params) != 3: raise Exception
return getattr(numpy, f)(*params)
except:
raise TypeError("supported binning format: [ %s ]" %", ".join(supported))
class PairCountCorrelationAlgorithm(Algorithm):
"""
Algorithm to compute the 1d or 2d correlation function and/or multipoles
via direct pair counting
Notes
-----
The algorithm saves the correlation function result to a plaintext file,
as well as the meta-data associted with the algorithm. The names of the
columns saved to file are:
- r :
the mean separation in each `r` bin
- mu : 2D corr only
the mean value for each `mu` bin
- corr :
the correlation function value
- corr_X :
the `X` multipole of the correlation function
- RR :
the number of random-random pairs in each bin; used to
properly normalize the correlation function
- N :
the number of pairs averaged over in each bin to compute
the correlation function
"""
plugin_name = "PairCountCorrelation"
def __init__(self, mode, rbins, field, other=None, subsample=1,
los='z', Nmu=10, poles=[]):
# positional arguments
self.mode = mode
self.rbins = rbins
self.field = field
# keyword arguments
self.other = other
self.subsample = subsample
self.los = los
self.Nmu = Nmu
self.poles = poles
# construct the input fields list
self.inputs = [self.field]
if self.other is not None:
self.inputs.append(self.other)
@classmethod
def fill_schema(cls):
s = cls.schema
s.description = "correlation function calculator via pair counting"
# the positional arguments
s.add_argument("mode", type=str, choices=["1d", "2d"],
help='measure the correlation function in `1d` or `2d`')
s.add_argument("rbins", type=binning_type,
help='the string specifying the binning to use')
s.add_argument("field", type=DataSource.from_config,
help='the first `DataSource` of objects to correlate; '
'run `nbkit.py --list-datasources` for all options')
s.add_argument("other", type=DataSource.from_config,
help='the other `DataSource` of objects to cross-correlate with; '
'run `nbkit.py --list-datasources` for all options')
s.add_argument("subsample", type=int, help='use 1 out of every N points')
s.add_argument("los", choices="xyz",
help="the line-of-sight: the angle `mu` is defined with respect to")
s.add_argument("Nmu", type=int,
help='if `mode == 2d`, the number of mu bins covering mu=[-1,1]')
s.add_argument('poles', nargs='*', type=int,
help='compute the multipoles for these `ell` values from xi(r,mu)')
def run(self):
"""
Run the pair-count correlation function and return the result
Returns
-------
edges : list or array_like
the array of 1d bin edges or a list of the bin edges in each dimension
result : dict
dictionary holding the data results (with associated names as keys) --
this results `corr`, `RR`, `N` + the mean bin values
"""
from nbodykit import measurestats
# check multipoles parameters
if len(self.poles) and self.mode == '2d':
raise ValueError("you specified multipole numbers but `mode` is `2d` -- perhaps you meant `1d`")
# set Nmu to 1 if doing 1d
if self.mode == "1d": self.Nmu = 0
# do the work
kw = {'comm':self.comm, 'subsample':self.subsample, 'Nmu':self.Nmu, 'los':self.los, 'poles':self.poles}
pc, xi, RR = measurestats.compute_brutal_corr(self.inputs, self.rbins, **kw)
# format the results
if self.mode == '1d':
if len(self.poles):
cols = ['r'] + ['corr_%d' %l for l in self.poles] + ['RR', 'N']
result = [pc.mean_centers] + [xi[:,i] for i in range(len(self.poles))] + [RR, pc.pair_counts]
else:
cols = ['r', 'corr', 'RR', 'N']
result = [pc.mean_centers, xi, RR, pc.pair_counts]
else:
cols = ['r', 'mu', 'corr', 'RR', 'N']
r, mu = pc.mean_centers
result = [r, mu, xi, RR, pc.pair_counts]
return pc.edges, dict(zip(cols, result))
def save(self, output, result):
"""
Save the result returned by `run()` to the filename specified by `output`
Parameters
----------
output : str
the string specifying the file to save
result : tuple
the tuple returned by `run()` -- first argument specifies the bin
edges and the second is a dictionary holding the data results
"""
from nbodykit.storage import MeasurementStorage
# only master writes
if self.comm.rank == 0:
edges, result = result
storage = MeasurementStorage.create(self.mode, output)
cols = list(result.keys())
values = list(result.values())
storage.write(edges, cols, values)
|
teslaji/homebase
|
refs/heads/master
|
venv/HomeBase/lib/python3.5/site-packages/django/views/static.py
|
46
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
sharad/calibre
|
refs/heads/master
|
src/calibre/gui2/actions/choose_library.py
|
1
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, posixpath, weakref
from functools import partial
from PyQt5.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize,
QCoreApplication, pyqtSignal, QVBoxLayout, QTimer)
from calibre import isbytestring, sanitize_file_name_unicode
from calibre.constants import (filesystem_encoding, iswindows,
get_portable_base)
from calibre.utils.config import prefs, tweaks
from calibre.utils.icu import sort_key
from calibre.gui2 import (gprefs, warning_dialog, Dispatcher, error_dialog,
question_dialog, info_dialog, open_local_file, choose_dir)
from calibre.gui2.actions import InterfaceAction
def db_class():
from calibre.db.legacy import LibraryDatabase
return LibraryDatabase
class LibraryUsageStats(object): # {{{
def __init__(self):
self.stats = {}
self.read_stats()
base = get_portable_base()
if base is not None:
lp = prefs['library_path']
if lp:
# Rename the current library. Renaming of other libraries is
# handled by the switch function
q = os.path.basename(lp)
for loc in list(self.stats.iterkeys()):
bn = posixpath.basename(loc)
if bn.lower() == q.lower():
self.rename(loc, lp)
def read_stats(self):
stats = gprefs.get('library_usage_stats', {})
self.stats = stats
def write_stats(self):
locs = list(self.stats.keys())
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
reverse=True)
for key in locs[500:]:
self.stats.pop(key)
gprefs.set('library_usage_stats', self.stats)
def remove(self, location):
self.stats.pop(location, None)
self.write_stats()
def canonicalize_path(self, lpath):
if isbytestring(lpath):
lpath = lpath.decode(filesystem_encoding)
lpath = lpath.replace(os.sep, '/')
return lpath
def library_used(self, db):
lpath = self.canonicalize_path(db.library_path)
if lpath not in self.stats:
self.stats[lpath] = 0
self.stats[lpath] += 1
self.write_stats()
return self.pretty(lpath)
def locations(self, db):
lpath = self.canonicalize_path(db.library_path)
locs = list(self.stats.keys())
if lpath in locs:
locs.remove(lpath)
limit = tweaks['many_libraries']
key = sort_key if len(locs) > limit else lambda x:self.stats[x]
locs.sort(key=key, reverse=len(locs)<=limit)
for loc in locs:
yield self.pretty(loc), loc
def pretty(self, loc):
if loc.endswith('/'):
loc = loc[:-1]
return loc.split('/')[-1]
def rename(self, location, newloc):
newloc = self.canonicalize_path(newloc)
stats = self.stats.pop(location, None)
if stats is not None:
self.stats[newloc] = stats
self.write_stats()
# }}}
class MovedDialog(QDialog): # {{{
def __init__(self, stats, location, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(_('No library found'))
self._l = l = QGridLayout(self)
self.setLayout(l)
self.stats, self.location = stats, location
loc = self.oldloc = location.replace('/', os.sep)
self.header = QLabel(_('No existing calibre library was found at %s. '
'If the library was moved, select its new location below. '
'Otherwise calibre will forget this library.')%loc)
self.header.setWordWrap(True)
ncols = 2
l.addWidget(self.header, 0, 0, 1, ncols)
self.cl = QLabel('<br><b>'+_('New location of this library:'))
l.addWidget(self.cl, 1, 0, 1, ncols)
self.loc = QLineEdit(loc, self)
l.addWidget(self.loc, 2, 0, 1, 1)
self.cd = QToolButton(self)
self.cd.setIcon(QIcon(I('document_open.png')))
self.cd.clicked.connect(self.choose_dir)
l.addWidget(self.cd, 2, 1, 1, 1)
self.bb = QDialogButtonBox(QDialogButtonBox.Abort)
b = self.bb.addButton(_('Library moved'), self.bb.AcceptRole)
b.setIcon(QIcon(I('ok.png')))
b = self.bb.addButton(_('Forget library'), self.bb.RejectRole)
b.setIcon(QIcon(I('edit-clear.png')))
b.clicked.connect(self.forget_library)
self.bb.accepted.connect(self.accept)
self.bb.rejected.connect(self.reject)
l.addWidget(self.bb, 3, 0, 1, ncols)
self.resize(self.sizeHint() + QSize(100, 50))
def choose_dir(self):
d = choose_dir(self, 'library moved choose new loc',
_('New library location'), default_dir=self.oldloc)
if d is not None:
self.loc.setText(d)
def forget_library(self):
self.stats.remove(self.location)
def accept(self):
newloc = unicode(self.loc.text())
if not db_class().exists_at(newloc):
error_dialog(self, _('No library found'),
_('No existing calibre library found at %s')%newloc,
show=True)
return
self.stats.rename(self.location, newloc)
self.newloc = newloc
QDialog.accept(self)
# }}}
class BackupStatus(QDialog): # {{{
def __init__(self, gui):
QDialog.__init__(self, gui)
self.l = l = QVBoxLayout(self)
self.msg = QLabel('')
self.msg.setWordWrap(True)
l.addWidget(self.msg)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Close)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
b = bb.addButton(_('Queue &all books for backup'), bb.ActionRole)
b.clicked.connect(self.mark_all_dirty)
b.setIcon(QIcon(I('lt.png')))
l.addWidget(bb)
self.db = weakref.ref(gui.current_db)
self.setResult(9)
self.setWindowTitle(_('Backup status'))
self.update()
self.resize(self.sizeHint() + QSize(50, 15))
def update(self):
db = self.db()
if db is None:
return
if self.result() != 9:
return
dirty_text = 'no'
try:
dirty_text = '%s' % db.dirty_queue_length()
except:
dirty_text = _('none')
self.msg.setText('<p>' +
_('Book metadata files remaining to be written: %s') % dirty_text)
QTimer.singleShot(1000, self.update)
def mark_all_dirty(self):
db = self.db()
db.new_api.mark_as_dirty(db.new_api.all_book_ids())
# }}}
class ChooseLibraryAction(InterfaceAction):
name = 'Choose Library'
action_spec = (_('Choose Library'), 'lt.png',
_('Choose calibre library to work with'), None)
dont_add_to = frozenset(['context-menu-device'])
action_add_menu = True
action_menu_clone_qaction = _('Switch/create library...')
restore_view_state = pyqtSignal(object)
def genesis(self):
self.count_changed(0)
self.action_choose = self.menuless_qaction
self.stats = LibraryUsageStats()
self.popup_type = (QToolButton.InstantPopup if len(self.stats.stats) > 1 else
QToolButton.MenuButtonPopup)
if len(self.stats.stats) > 1:
self.action_choose.triggered.connect(self.choose_library)
else:
self.qaction.triggered.connect(self.choose_library)
self.choose_menu = self.qaction.menu()
ac = self.create_action(spec=(_('Pick a random book'), 'random.png',
None, None), attr='action_pick_random')
ac.triggered.connect(self.pick_random)
if not os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
self.choose_menu.addAction(self.action_choose)
self.quick_menu = QMenu(_('Quick switch'))
self.quick_menu_action = self.choose_menu.addMenu(self.quick_menu)
self.rename_menu = QMenu(_('Rename library'))
self.rename_menu_action = self.choose_menu.addMenu(self.rename_menu)
self.choose_menu.addAction(ac)
self.delete_menu = QMenu(_('Remove library'))
self.delete_menu_action = self.choose_menu.addMenu(self.delete_menu)
else:
self.choose_menu.addAction(ac)
self.rename_separator = self.choose_menu.addSeparator()
self.switch_actions = []
for i in range(5):
ac = self.create_action(spec=('', None, None, None),
attr='switch_action%d'%i)
self.switch_actions.append(ac)
ac.setVisible(False)
ac.triggered.connect(partial(self.qs_requested, i),
type=Qt.QueuedConnection)
self.choose_menu.addAction(ac)
self.rename_separator = self.choose_menu.addSeparator()
self.maintenance_menu = QMenu(_('Library Maintenance'))
ac = self.create_action(spec=(_('Library metadata backup status'),
'lt.png', None, None), attr='action_backup_status')
ac.triggered.connect(self.backup_status, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
ac = self.create_action(spec=(_('Check library'), 'lt.png',
None, None), attr='action_check_library')
ac.triggered.connect(self.check_library, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
ac = self.create_action(spec=(_('Restore database'), 'lt.png',
None, None),
attr='action_restore_database')
ac.triggered.connect(self.restore_database, type=Qt.QueuedConnection)
self.maintenance_menu.addAction(ac)
self.choose_menu.addMenu(self.maintenance_menu)
self.view_state_map = {}
self.restore_view_state.connect(self._restore_view_state,
type=Qt.QueuedConnection)
@property
def preserve_state_on_switch(self):
ans = getattr(self, '_preserve_state_on_switch', None)
if ans is None:
self._preserve_state_on_switch = ans = \
self.gui.library_view.preserve_state(require_selected_ids=False)
return ans
def pick_random(self, *args):
self.gui.iactions['Pick Random Book'].pick_random()
def library_name(self):
db = self.gui.library_view.model().db
path = db.library_path
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = path.replace(os.sep, '/')
return self.stats.pretty(path)
def update_tooltip(self, count):
tooltip = self.action_spec[2] + '\n\n' + _('{0} [{1} books]').format(
getattr(self, 'last_lname', ''), count)
a = self.qaction
a.setToolTip(tooltip)
a.setStatusTip(tooltip)
a.setWhatsThis(tooltip)
def library_changed(self, db):
lname = self.stats.library_used(db)
self.last_lname = lname
if len(lname) > 16:
lname = lname[:16] + u'…'
a = self.qaction
a.setText(lname)
self.update_tooltip(db.count())
self.build_menus()
state = self.view_state_map.get(self.stats.canonicalize_path(
db.library_path), None)
if state is not None:
self.restore_view_state.emit(state)
def _restore_view_state(self, state):
self.preserve_state_on_switch.state = state
def initialization_complete(self):
self.library_changed(self.gui.library_view.model().db)
def build_menus(self):
if os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
return
db = self.gui.library_view.model().db
locations = list(self.stats.locations(db))
for ac in self.switch_actions:
ac.setVisible(False)
self.quick_menu.clear()
self.qs_locations = [i[1] for i in locations]
self.rename_menu.clear()
self.delete_menu.clear()
quick_actions, rename_actions, delete_actions = [], [], []
for name, loc in locations:
ac = self.quick_menu.addAction(name, Dispatcher(partial(self.switch_requested,
loc)))
ac.setStatusTip(_('Switch to: %s') % loc)
quick_actions.append(ac)
ac = self.rename_menu.addAction(name, Dispatcher(partial(self.rename_requested,
name, loc)))
rename_actions.append(ac)
ac.setStatusTip(_('Rename: %s') % loc)
ac = self.delete_menu.addAction(name, Dispatcher(partial(self.delete_requested,
name, loc)))
delete_actions.append(ac)
ac.setStatusTip(_('Remove: %s') % loc)
qs_actions = []
for i, x in enumerate(locations[:len(self.switch_actions)]):
name, loc = x
ac = self.switch_actions[i]
ac.setText(name)
ac.setStatusTip(_('Switch to: %s') % loc)
ac.setVisible(True)
qs_actions.append(ac)
self.quick_menu_action.setVisible(bool(locations))
self.rename_menu_action.setVisible(bool(locations))
self.delete_menu_action.setVisible(bool(locations))
self.gui.location_manager.set_switch_actions(quick_actions,
rename_actions, delete_actions, qs_actions,
self.action_choose)
# Allow the cloned actions in the OS X global menubar to update
for a in (self.qaction, self.menuless_qaction):
a.changed.emit()
def location_selected(self, loc):
enabled = loc == 'library'
self.qaction.setEnabled(enabled)
def rename_requested(self, name, location):
LibraryDatabase = db_class()
loc = location.replace('/', os.sep)
base = os.path.dirname(loc)
newname, ok = QInputDialog.getText(self.gui, _('Rename') + ' ' + name,
'<p>'+_('Choose a new name for the library <b>%s</b>. ')%name +
'<p>'+_('Note that the actual library folder will be renamed.'),
text=name)
newname = sanitize_file_name_unicode(unicode(newname))
if not ok or not newname or newname == name:
return
newloc = os.path.join(base, newname)
if os.path.exists(newloc):
return error_dialog(self.gui, _('Already exists'),
_('The folder %s already exists. Delete it first.') %
newloc, show=True)
if (iswindows and len(newloc) >
LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters.')%LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
if not os.path.exists(loc):
error_dialog(self.gui, _('Not found'),
_('Cannot rename as no library was found at %s. '
'Try switching to this library first, then switch back '
'and retry the renaming.')%loc, show=True)
return
try:
os.rename(loc, newloc)
except:
import traceback
det_msg = 'Location: %r New Location: %r\n%s'%(loc, newloc,
traceback.format_exc())
error_dialog(self.gui, _('Rename failed'),
_('Failed to rename the library at %s. '
'The most common cause for this is if one of the files'
' in the library is open in another program.') % loc,
det_msg=det_msg, show=True)
return
self.stats.rename(location, newloc)
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
def delete_requested(self, name, location):
loc = location.replace('/', os.sep)
if not question_dialog(
self.gui, _('Library removed'), _(
'The library %s has been removed from calibre. '
'The files remain on your computer, if you want '
'to delete them, you will have to do so manually.') % ('<code>%s</code>' % loc),
override_icon='dialog_information.png',
yes_text=_('&OK'), no_text=_('&Undo'), yes_icon='ok.png', no_icon='edit-undo.png'):
return
self.stats.remove(location)
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
if os.path.exists(loc):
open_local_file(loc)
def backup_status(self, location):
self.__backup_status_dialog = d = BackupStatus(self.gui)
d.show()
def mark_dirty(self):
db = self.gui.library_view.model().db
db.dirtied(list(db.data.iterallids()))
info_dialog(self.gui, _('Backup metadata'),
_('Metadata will be backed up while calibre is running, at the '
'rate of approximately 1 book every three seconds.'), show=True)
def restore_database(self):
LibraryDatabase = db_class()
m = self.gui.library_view.model()
db = m.db
if (iswindows and len(db.library_path) >
LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters. Move your library to a location with'
' a shorter path using Windows Explorer, then point'
' calibre to the new location and try again.')%
LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
from calibre.gui2.dialogs.restore_library import restore_database
m = self.gui.library_view.model()
m.stop_metadata_backup()
db = m.db
db.prefs.disable_setting = True
if restore_database(db, self.gui):
self.gui.library_moved(db.library_path, call_close=False)
def check_library(self):
from calibre.gui2.dialogs.check_library import CheckLibraryDialog, DBCheck
self.gui.library_view.save_state()
m = self.gui.library_view.model()
m.stop_metadata_backup()
db = m.db
db.prefs.disable_setting = True
d = DBCheck(self.gui, db)
d.start()
try:
d.conn.close()
except:
pass
d.break_cycles()
self.gui.library_moved(db.library_path, call_close=not
d.closed_orig_conn)
if d.rejected:
return
if d.error is None:
if not question_dialog(self.gui, _('Success'),
_('Found no errors in your calibre library database.'
' Do you want calibre to check if the files in your '
' library match the information in the database?')):
return
else:
return error_dialog(self.gui, _('Failed'),
_('Database integrity check failed, click Show details'
' for details.'), show=True, det_msg=d.error[1])
self.gui.status_bar.show_message(
_('Starting library scan, this may take a while'))
try:
QCoreApplication.processEvents()
d = CheckLibraryDialog(self.gui, m.db)
if not d.do_exec():
info_dialog(self.gui, _('No problems found'),
_('The files in your library match the information '
'in the database.'), show=True)
finally:
self.gui.status_bar.clear_message()
def look_for_portable_lib(self, db, location):
base = get_portable_base()
if base is None:
return False, None
loc = location.replace('/', os.sep)
candidate = os.path.join(base, os.path.basename(loc))
if db.exists_at(candidate):
newloc = candidate.replace(os.sep, '/')
self.stats.rename(location, newloc)
return True, newloc
return False, None
def switch_requested(self, location):
if not self.change_library_allowed():
return
db = self.gui.library_view.model().db
current_lib = self.stats.canonicalize_path(db.library_path)
self.view_state_map[current_lib] = self.preserve_state_on_switch.state
loc = location.replace('/', os.sep)
exists = db.exists_at(loc)
if not exists:
exists, new_location = self.look_for_portable_lib(db, location)
if exists:
location = new_location
loc = location.replace('/', os.sep)
if not exists:
d = MovedDialog(self.stats, location, self.gui)
ret = d.exec_()
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
if ret == d.Accepted:
loc = d.newloc.replace('/', os.sep)
else:
return
# from calibre.utils.mem import memory
# import weakref
# from PyQt5.Qt import QTimer
# self.dbref = weakref.ref(self.gui.library_view.model().db)
# self.before_mem = memory()
self.gui.library_moved(loc, allow_rebuild=True)
# QTimer.singleShot(5000, self.debug_leak)
def debug_leak(self):
import gc
from calibre.utils.mem import memory
ref = self.dbref
for i in xrange(3):
gc.collect()
if ref() is not None:
print 'DB object alive:', ref()
for r in gc.get_referrers(ref())[:10]:
print r
print
print 'before:', self.before_mem
print 'after:', memory()
print
self.dbref = self.before_mem = None
def qs_requested(self, idx, *args):
self.switch_requested(self.qs_locations[idx])
def count_changed(self, new_count):
self.update_tooltip(new_count)
def choose_library(self, *args):
if not self.change_library_allowed():
return
from calibre.gui2.dialogs.choose_library import ChooseLibrary
self.gui.library_view.save_state()
db = self.gui.library_view.model().db
location = self.stats.canonicalize_path(db.library_path)
self.pre_choose_dialog_location = location
c = ChooseLibrary(db, self.choose_library_callback, self.gui)
c.exec_()
self.choose_dialog_library_renamed = getattr(c, 'library_renamed', False)
def choose_library_callback(self, newloc, copy_structure=False):
self.gui.library_moved(newloc, copy_structure=copy_structure,
allow_rebuild=True)
if getattr(self, 'choose_dialog_library_renamed', False):
self.stats.rename(self.pre_choose_dialog_location, prefs['library_path'])
self.build_menus()
self.gui.iactions['Copy To Library'].build_menus()
def change_library_allowed(self):
if os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH', None):
warning_dialog(self.gui, _('Not allowed'),
_('You cannot change libraries while using the environment'
' variable CALIBRE_OVERRIDE_DATABASE_PATH.'), show=True)
return False
if self.gui.job_manager.has_jobs():
warning_dialog(self.gui, _('Not allowed'),
_('You cannot change libraries while jobs'
' are running.'), show=True)
return False
return True
|
chengduoZH/Paddle
|
refs/heads/develop
|
python/paddle/fluid/tests/unittests/test_box_coder_op.py
|
5
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
import math
from op_test import OpTest
def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False)
pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1)
pb_w = pb_w.reshape(shape)
pb_h = pb_h.reshape(shape)
pb_x = pb_x.reshape(shape)
pb_y = pb_y.reshape(shape)
if pb_v.ndim == 2:
var_shape = (1, pb_v.shape[0], pb_v.shape[1]) if axis == 0 else (
pb_v.shape[0], 1, pb_v.shape[1])
pb_v = pb_v.reshape(var_shape)
if pb_v.ndim == 1:
tb_x = pb_v[0] * t_box[:, :, 0] * pb_w + pb_x
tb_y = pb_v[1] * t_box[:, :, 1] * pb_h + pb_y
tb_w = np.exp(pb_v[2] * t_box[:, :, 2]) * pb_w
tb_h = np.exp(pb_v[3] * t_box[:, :, 3]) * pb_h
else:
tb_x = pb_v[:, :, 0] * t_box[:, :, 0] * pb_w + pb_x
tb_y = pb_v[:, :, 1] * t_box[:, :, 1] * pb_h + pb_y
tb_w = np.exp(pb_v[:, :, 2] * t_box[:, :, 2]) * pb_w
tb_h = np.exp(pb_v[:, :, 3] * t_box[:, :, 3]) * pb_h
output_box[:, :, 0] = tb_x - tb_w / 2
output_box[:, :, 1] = tb_y - tb_h / 2
output_box[:, :, 2] = tb_x + tb_w / 2 - (not norm)
output_box[:, :, 3] = tb_y + tb_h / 2 - (not norm)
def box_encoder(t_box, p_box, pb_v, output_box, norm):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False)
pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0])
pb_w = pb_w.reshape(shape)
pb_h = pb_h.reshape(shape)
pb_x = pb_x.reshape(shape)
pb_y = pb_y.reshape(shape)
if pb_v.ndim == 2:
pb_v = pb_v.reshape(1, pb_v.shape[0], pb_v.shape[1])
tb_x = ((t_box[:, 2] + t_box[:, 0]) / 2).reshape(t_box.shape[0], 1)
tb_y = ((t_box[:, 3] + t_box[:, 1]) / 2).reshape(t_box.shape[0], 1)
tb_w = (t_box[:, 2] - t_box[:, 0]).reshape(t_box.shape[0], 1) + (not norm)
tb_h = (t_box[:, 3] - t_box[:, 1]).reshape(t_box.shape[0], 1) + (not norm)
if pb_v.ndim == 1:
output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[0]
output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[1]
output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[2]
output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[3]
else:
output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[:, :, 0]
output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[:, :, 1]
output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[:, :, 2]
output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[:, :, 3]
def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0):
n = t_box.shape[0]
m = p_box.shape[0]
if code_type == "DecodeCenterSize":
m = t_box.shape[1]
output_box = np.zeros((n, m, 4), dtype=np.float32)
cur_offset = 0
for i in range(len(lod)):
if (code_type == "EncodeCenterSize"):
box_encoder(t_box[cur_offset:(cur_offset + lod[i]), :], p_box, pb_v,
output_box[cur_offset:(cur_offset + lod[i]), :, :],
norm)
elif (code_type == "DecodeCenterSize"):
box_decoder(t_box, p_box, pb_v, output_box, norm, axis)
cur_offset += lod[i]
return output_box
class TestBoxCoderOp(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_coder"
lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((81, 4)).astype('float32')
prior_box_var = np.random.random((81, 4)).astype('float32')
target_box = np.random.random((20, 81, 4)).astype('float32')
code_type = "DecodeCenterSize"
box_normalized = False
output_box = batch_box_coder(prior_box, prior_box_var, target_box,
lod[0], code_type, box_normalized)
self.inputs = {
'PriorBox': prior_box,
'PriorBoxVar': prior_box_var,
'TargetBox': target_box,
}
self.attrs = {
'code_type': 'decode_center_size',
'box_normalized': False
}
self.outputs = {'OutputBox': output_box}
class TestBoxCoderOpWithoutBoxVar(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_coder"
lod = [[0, 1, 2, 3, 4, 5]]
prior_box = np.random.random((81, 4)).astype('float32')
prior_box_var = np.ones((81, 4)).astype('float32')
target_box = np.random.random((20, 81, 4)).astype('float32')
code_type = "DecodeCenterSize"
box_normalized = False
output_box = batch_box_coder(prior_box, prior_box_var, target_box,
lod[0], code_type, box_normalized)
self.inputs = {
'PriorBox': prior_box,
'TargetBox': target_box,
}
self.attrs = {
'code_type': 'decode_center_size',
'box_normalized': False
}
self.outputs = {'OutputBox': output_box}
class TestBoxCoderOpWithLoD(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_coder"
lod = [[10, 20, 20]]
prior_box = np.random.random((20, 4)).astype('float32')
prior_box_var = np.random.random((20, 4)).astype('float32')
target_box = np.random.random((50, 4)).astype('float32')
code_type = "EncodeCenterSize"
box_normalized = True
output_box = batch_box_coder(prior_box, prior_box_var, target_box,
lod[0], code_type, box_normalized)
self.inputs = {
'PriorBox': prior_box,
'PriorBoxVar': prior_box_var,
'TargetBox': (target_box, lod),
}
self.attrs = {'code_type': 'encode_center_size', 'box_normalized': True}
self.outputs = {'OutputBox': output_box}
class TestBoxCoderOpWithAxis(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_coder"
lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((30, 4)).astype('float32')
prior_box_var = np.random.random((30, 4)).astype('float32')
target_box = np.random.random((30, 81, 4)).astype('float32')
code_type = "DecodeCenterSize"
box_normalized = False
axis = 1
output_box = batch_box_coder(prior_box, prior_box_var, target_box,
lod[0], code_type, box_normalized, axis)
self.inputs = {
'PriorBox': prior_box,
'PriorBoxVar': prior_box_var,
'TargetBox': target_box,
}
self.attrs = {
'code_type': 'decode_center_size',
'box_normalized': False,
'axis': axis
}
self.outputs = {'OutputBox': output_box}
class TestBoxCoderOpWithVariance(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_coder"
lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((30, 4)).astype('float32')
prior_box_var = np.random.random((4)).astype('float32')
target_box = np.random.random((30, 81, 4)).astype('float32')
code_type = "DecodeCenterSize"
box_normalized = False
axis = 1
output_box = batch_box_coder(prior_box, prior_box_var, target_box,
lod[0], code_type, box_normalized, axis)
self.inputs = {
'PriorBox': prior_box,
'TargetBox': target_box,
}
self.attrs = {
'code_type': 'decode_center_size',
'box_normalized': False,
'variance': prior_box_var.astype(np.float).flatten(),
'axis': axis
}
self.outputs = {'OutputBox': output_box}
if __name__ == '__main__':
unittest.main()
|
VHAINNOVATIONS/DmD
|
refs/heads/master
|
scrubber/MIST_2_0_4/src/CherryPy-3.1.2/cherrypy/_cpchecker.py
|
13
|
import os
import warnings
import cherrypy
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
on: set this to False to turn off the checker completely.
When this object is called at engine startup, it executes each
of its own methods whose names start with "check_". If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False:
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace check_* methods in this way.
"""
on = True
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and callable(method):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_skipped_app_config(self):
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_static_paths(self):
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.iteritems():
if isinstance(conf, dict):
for k, v in conf.iteritems():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(app.toolboxes.keys())
ns.extend(app.namespaces.keys())
ns.extend(app.request_class.namespaces.keys())
ns.extend(cherrypy.config.namespaces.keys())
ns += self.extra_config_namespaces
for section, conf in app.config.iteritems():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.iteritems():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
import __builtin__
builtins = [x for x in vars(__builtin__).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
vtype = type(getattr(obj, name, None))
if vtype in builtins:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.iteritems():
if isinstance(conf, dict):
for k, v in conf.iteritems():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.iteritems():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
|
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/assemble.py
|
16
|
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
# Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import os
import os.path
import re
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
decrypt = self._task.args.get('decrypt', True)
try:
if src is None or dest is None:
raise AnsibleActionFail("src and dest are required")
if boolean(remote_src, strict=False):
result.update(self._execute_module(task_vars=task_vars))
raise _AnsibleActionDone()
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
if not os.path.isdir(src):
raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args.update(
dict(
dest=dest,
original_basename=os.path.basename(src),
)
)
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
new_module_args.update(dict(src=xfered,))
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
wweiradio/django
|
refs/heads/master
|
tests/auth_tests/backend_alias.py
|
512
|
# For testing that auth backends can be referenced using a convenience import
from .test_auth_backends import ImportedModelBackend
__all__ = ['ImportedModelBackend']
|
stefanseefeld/qmtest
|
refs/heads/master
|
qm/external/DocumentTemplate/DT_If.py
|
2
|
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__doc__='''Conditional insertion
Conditional insertion is performed using 'if' and 'else'
commands.
To include text when an object is true using the EPFS
format, use::
%(if name)[
text
%(if name)]
To include text when an object is true using the HTML
format, use::
<!--#if name-->
text
<!--#/if name-->
where 'name' is the name bound to the object.
To include text when an object is false using the EPFS
format, use::
%(else name)[
text
%(else name)]
To include text when an object is false using the HTML
format, use::
<!--#else name-->
text
<!--#/else name-->
Finally to include text when an object is true and to
include different text when the object is false using the
EPFS format, use::
%(if name)[
true text
%(if name)]
%(else name)[
false text
%(else name)]
and to include text when an object is true and to
include different text when the object is false using the
HTML format, use::
<!--#if name-->
true text
<!--#else name-->
false text
<!--#/if name-->
Notes:
- if a variable is nor defined, it is considered to be false.
- A variable if only evaluated once in an 'if' tag. If the value
is used inside the tag, including in enclosed tags, the
variable is not reevaluated.
'''
__rcs_id__='$Id$'
__version__='$Revision$'[11:-2]
from DT_Util import ParseError, parse_params, name_param, str
class If:
blockContinuations='else','elif'
name='if'
elses=None
expr=''
def __init__(self, blocks):
tname, args, section = blocks[0]
args=parse_params(args, name='', expr='')
name,expr=name_param(args,'if',1)
self.__name__= name
if expr is None: cond=name
else: cond=expr.eval
sections=[cond, section.blocks]
if blocks[-1][0]=='else':
tname, args, section = blocks[-1]
del blocks[-1]
args=parse_params(args, name='')
if args:
ename,expr=name_param(args,'else',1)
if ename != name:
raise ParseError, ('name in else does not match if', 'in')
elses=section.blocks
else: elses=None
for tname, args, section in blocks[1:]:
if tname=='else':
raise ParseError, (
'more than one else tag for a single if tag', 'in')
args=parse_params(args, name='', expr='')
name,expr=name_param(args,'elif',1)
if expr is None: cond=name
else: cond=expr.eval
sections.append(cond)
sections.append(section.blocks)
if elses is not None: sections.append(elses)
self.simple_form=tuple(sections)
class Unless:
name='unless'
blockContinuations=()
def __init__(self, blocks):
tname, args, section = blocks[0]
args=parse_params(args, name='', expr='')
name,expr=name_param(args,'unless',1)
if expr is None: cond=name
else: cond=expr.eval
self.simple_form=(cond,None,section.blocks)
class Else(Unless):
# The else tag is included for backward compatibility and is deprecated.
name='else'
|
denisenkom/django
|
refs/heads/master
|
tests/generic_views/test_detail.py
|
10
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.views.generic.base import View
from .models import Artist, Author, Page
class DetailViewTest(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_simple_object(self):
res = self.client.get('/detail/obj/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], {'foo': 'bar'})
self.assertIsInstance(res.context['view'], View)
self.assertTemplateUsed(res, 'generic_views/detail.html')
def test_detail_by_pk(self):
res = self.client.get('/detail/author/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_pk(self):
res = self.client.get('/detail/author/bycustompk/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_slug(self):
res = self.client.get('/detail/author/byslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_slug(self):
res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_verbose_name(self):
res = self.client.get('/detail/artist/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Artist.objects.get(pk=1))
self.assertEqual(res.context['artist'], Artist.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/artist_detail.html')
def test_template_name(self):
res = self.client.get('/detail/author/1/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/about.html')
def test_template_name_suffix(self):
res = self.client.get('/detail/author/1/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['author'], Author.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/author_view.html')
def test_template_name_field(self):
res = self.client.get('/detail/page/1/field/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Page.objects.get(pk=1))
self.assertEqual(res.context['page'], Page.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/page_template.html')
def test_context_object_name(self):
res = self.client.get('/detail/author/1/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=1))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_duplicated_context_object_name(self):
res = self.client.get('/detail/author/1/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=1))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/')
def test_invalid_queryset(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/')
def test_non_model_object_with_meta(self):
res = self.client.get('/detail/nonmodel/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'].id, "non_model_1")
|
ChungChe/pyamazonclouddrive
|
refs/heads/master
|
bin/acdput.py
|
10
|
#!/usr/bin/env python
#
# Copyright (c) 2011 anatanokeitai.com(sakurai_youhei)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, sys, getpass
from optparse import OptionParser
try:
import pyacd
except ImportError:
pyacd_lib_dir=os.path.dirname(__file__)+os.sep+".."
if os.path.exists(pyacd_lib_dir) and os.path.isdir(pyacd_lib_dir):
sys.path.insert(0, pyacd_lib_dir)
import pyacd
parser=OptionParser(
epilog="This command uploads file(s) to your Amazon Cloud Drive. "+
"If the same named file exists, uploading file is renamed "+
"automatically. (e.g. 'test.mp3' -> 'test (2).mp3')",
usage="%prog [Options] file1 file2 - ...('-' means STDIN)",
version=pyacd.__version__
)
parser.add_option(
"--domain",dest="domain",action="store",default="www.amazon.com",
help="domain of Amazon [default: %default]"
)
parser.add_option(
"-e",dest="email",action="store",default=None,
help="email address for Amazon"
)
parser.add_option(
"-p",dest="password",action="store",default=None,
help="password for Amazon"
)
parser.add_option(
"-s",dest="session",action="store",default=None,metavar="FILE",
help="save/load login session to/from FILE"
)
parser.add_option(
"-d",dest="path",action="store",default="/",
help="upload path [default: %default]"
)
parser.add_option(
"-v",dest="verbose",action="store_true",default=False,
help="show verbose message"
)
def main():
opts,args=parser.parse_args(sys.argv[1:])
pyacd.set_amazon_domain(opts.domain)
args=list(set(args))
if "-" in args:
args.remove("-")
args += [x.strip() for x in sys.stdin.readlines()]
if 0==len(args):
sys.stderr.write("No file selected.\n")
parser.print_help()
sys.exit(2)
else:
for file in args:
if not os.path.exists(file):
sys.stderr.write('Not found "%s"\n'%file)
sys.exit(2)
elif os.path.isdir(file):
sys.stderr.write('"%s" is not file\n'%file)
sys.exit(2)
# Check options of authentication
if opts.email:
if not opts.password:
opts.password = getpass.getpass()
if (opts.email and opts.password) or opts.session:
pass # OK case
else:
print >>sys.stderr, "Either email and password or session is mondatory."
sys.exit(2)
session = None; s = None
if opts.session:
if opts.verbose:
print >>sys.stderr, "Loading previous session...",
try:
s=pyacd.Session.load_from_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
s=pyacd.Session()
if opts.verbose:
print >>sys.stderr, "Failed."
if opts.verbose:
print >>sys.stderr, "Logging into %s..."%opts.domain,
try:
if opts.email and opts.password and s:
session=pyacd.login(opts.email,opts.password,session=s)
elif opts.email and opts.password:
session=pyacd.login(opts.email,opts.password)
else:
session=pyacd.login(session=s)
if opts.verbose:
print >>sys.stderr, "Done."
except:
if opts.verbose:
print >>sys.stderr, "Failed."
sys.exit(2)
# Check login status
if not session:
sys.stderr.write("Unexpected error occured.\n")
sys.exit(2)
elif not session.is_logged_in():
sys.stderr.write("Login failed.\n%s\n"%session)
sys.exit(2)
# Check destination
path=opts.path
if path[0]!='/':path='/'+path
if path[-1]!='/':path=path+'/'
try:
dest = pyacd.api.get_info_by_path(path)
if dest.Type == pyacd.types.FILE:
sys.stderr.write('"%s" is file\n'%path)
sys.exit(2)
except pyacd.PyAmazonCloudDriveApiException,e:
sys.stderr.write('"%s"\n'%e.reason)
sys.exit(2)
for f in args:
filename = os.path.basename(f)
fp=open(f,"rb")
filedata = fp.read()
fp.close()
if opts.verbose:
sys.stderr.write("Uploading %s to %s ... "%(filename,path))
fileobj = pyacd.api.create_by_path(path,filename)
try:
upload_url = pyacd.api.get_upload_url_by_id(fileobj.object_id,len(filedata))
except pyacd.PyAmazonCloudDriveError,e:
if opts.verbose:
print >>sys.stderr, "Failed."
sys.stderr.write('"%s"\n'%e)
continue
end_point=upload_url.http_request.end_point
parameters=upload_url.http_request.parameters
storage_key=upload_url.storage_key
object_id=upload_url.object_id
if opts.verbose:
sys.stderr.write("Sending data... ")
pyacd.api.upload(end_point,parameters,filename,filedata)
# completeing file
if opts.verbose:
sys.stderr.write("Finishing... ")
pyacd.api.complete_file_upload_by_id(object_id,storage_key)
if opts.verbose:
sys.stderr.write("Done\n")
if opts.verbose:
print >>sys.stderr, "Updating current session...",
try:
session.save_to_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
if opts.verbose:
print >>sys.stderr, "Failed."
if __name__=="__main__":
main()
|
limbail/ceamon
|
refs/heads/master
|
pandorabox/todo/system_info.py
|
1
|
from tornado.ioloop import PeriodicCallback
from swampdragon.pubsub_providers.data_publisher import publish_data
import psutil
import requests, json
from ceamon.models import sapnode
url = "http://localhost:9988/sapnode/"
pcb = None
def broadcast_sys_info():
global pcb
if pcb is None:
pcb = PeriodicCallback(broadcast_sys_info, 3000)
pcb.start()
danger = sapnode.objects.filter(status='danger').order_by('sid').values().distinct()
danger_count=len(danger)
warning = sapnode.objects.filter(status='warning').order_by('sid').values().distinct()
warning_count=len(warning)
instancias = sapnode.objects.filter().order_by('sid').values().distinct()
instancias_count=len(instancias)
instancias_abap = sapnode.objects.filter(product='abap').order_by('sid').values().distinct()
instancias_abap_count=len(instancias_abap)
instancias_portal = sapnode.objects.filter(product='portal').order_by('sid').values().distinct()
instancias_portal_count=len(instancias_portal)
instancias_javaengine = sapnode.objects.filter(product='java_engine').order_by('sid').values().distinct()
instancias_javaengine_count=len(instancias_javaengine)
instancias_opentext = sapnode.objects.filter(product='opentext').order_by('sid').values().distinct()
instancias_opentext_count=len(instancias_opentext)
publish_data('sysinfo', {
'danger_count':danger_count,
'warning_count': warning_count,
'instancias_count': instancias_count,
'instancias_abap_count':instancias_abap_count,
'instancias_portal_count':instancias_portal_count,
'instancias_javaengine_count':instancias_javaengine_count,
'instancias_opentext_count':instancias_opentext_count,
})
|
lmorchard/django
|
refs/heads/master
|
tests/postgres_tests/array_default_migrations/0001_initial.py
|
377
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='IntegerArrayDefaultModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None)),
],
options={
},
bases=(models.Model,),
),
]
|
gerald-yang/ubuntu-iotivity-demo
|
refs/heads/master
|
snappy/grovepi/pygrovepi/grove_electricity_sensor.py
|
7
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Electricity Sensor (http://www.seeedstudio.com/wiki/Grove_-_Electricity_Sensor)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Electricity Sensor to analog port A0
# SIG,NC,NC,GND
sensor = 0
grovepi.pinMode(sensor,"INPUT")
# Vcc of the grove interface is normally 5v
grove_vcc = 5
while True:
try:
# Get sensor value
sensor_value = grovepi.analogRead(sensor)
# Calculate amplitude current (mA)
amplitude_current = (float)(sensor_value / 1024 * grove_vcc / 800 * 2000000)
# Calculate effective value (mA)
effective_value = amplitude_current / 1.414
# minimum_current = 1 / 1024 * grove_vcc / 800 * 2000000 / 1.414 = 8.6(mA)
# Only for sinusoidal alternating current
print ("sensor_value", sensor_value)
print ("The amplitude of the current is", amplitude_current, "mA")
print ("The effective value of the current is", effective_value, "mA")
time.sleep(1)
except IOError:
print ("Error")
|
tomquas/mongrel2
|
refs/heads/master
|
examples/python/mongrel2/config/commands.py
|
90
|
from mongrel2 import config
from mongrel2.config import args
import mongrel2.config.commands
from uuid import uuid4
from mongrel2.config import model
import getpass
import sys
import os
import signal
from sqlite3 import OperationalError
def try_reading(reader):
try:
cmd = reader.readline()
return cmd.split(' ')
except UnicodeDecodeError:
print "\nERROR: Sorry, PyRepl and Python hate printing to your screen: UnicodeDecodeError."
return []
def shell_command():
"""
Starts an interactive shell with readline style input so you can
work with Mongrel2 easier.
"""
try:
from pyrepl.unix_console import UnixConsole
from pyrepl.historical_reader import HistoricalReader
except:
print "You don't have PyRepl installed, shell not available."
reader = HistoricalReader(UnixConsole())
reader.ps1 = "m2> "
reader.ps2 = "..> "
reader.ps3 = "...> "
reader.ps4 = "....> "
try:
while True:
cmd = try_reading(reader)
if cmd:
try:
args.parse_and_run_command(cmd, mongrel2.config.commands)
except Exception, e:
print "ERROR:", e
except EOFError:
print "Bye."
except KeyboardInterrupt:
print "BYE!"
def help_command(**options):
"""
Prints out help for the commands.
m2sh help
You can get help for one command with:
m2sh help -for STR
"""
if "for" in options:
help_text = args.help_for_command(config.commands, options['for'])
if help_text:
print help_text
else:
args.invalid_command_message(config.commands)
else:
print "Available commands:\n"
print "\n".join(args.available_commands(config.commands))
print "\nUse config help -for <command> to find out more."
def dump_command(db=None):
"""
Simple dump of a config database:
m2sh dump -db config.sqlite
"""
print "LOADING DB: ", db
try:
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
raise IOError
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print server
for host in server.hosts:
print "\t", host
for route in host.routes:
print "\t\t", route
except IOError:
print "%s not readable" % db
except OperationalError, exc:
print "SQLite error: %s" % exc
def uuid_command(hex=False):
"""
Generates a UUID for you to use in your configurations:
m2sh uuid
m2sh uuid -hex
The -hex means to print it as a big hex number, which is
more efficient but harder to read.
"""
if hex:
print uuid4().hex
else:
print str(uuid4())
def servers_command(db=None):
"""
Lists the servers that are configured in this setup:
m2sh servers -db config.sqlite
"""
if not os.path.isfile(db):
print "ERROR: Cannot access database file %s" % db
return
try:
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print "-------"
print server.name, server.default_host, server.uuid
for host in server.hosts:
print "\t", host.id, ':', host.name
except OperationalError, exc:
print "SQLite error: %s" % exc
def hosts_command(db=None, uuid="", host="", name=""):
"""
List all the hosts in the given server identified by UUID or host.
m2sh hosts -db config.sqlite -uuid f400bf85-4538-4f7a-8908-67e313d515c2
m2sh hosts -db config.sqlite -host localhost
m2sh hosts -db config.sqlite -name test
The -host parameter is the default_host for the server.
"""
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot read database file %s" % db
return
try:
store = model.begin(db)
results = None
if uuid:
results = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
results = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
results = store.find(model.Server, model.Server.name == unicode(name))
else:
print "ERROR: Must give a -host or -uuid or -name."
return
if results.count():
server = results[0]
hosts = store.find(model.Host, model.Host.server_id == server.id)
for host in hosts:
print "--------"
print host, ":"
for route in host.routes:
print "\t", route.path, ':', route.target
else:
print "No servers found."
except OperationalError, exc:
print "SQLite error: %s" % exc
def init_command(db=None):
"""
Initializes a new config database.
m2sh init -db config.sqlite
It will obliterate this config.
"""
from pkg_resources import resource_stream
import sqlite3
sql = resource_stream('mongrel2', 'sql/config.sql').read()
if model.store:
model.store.close()
model.store = None
if os.path.isfile(db) and not os.access(db, os.W_OK):
print "Cannot access database file %s" % db
return
try:
conn = sqlite3.connect(db)
conn.executescript(sql)
commit_command(db=db, what="init_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "Error: %s" % exc
def load_command(db=None, config=None, clear=True):
"""
After using init you can use this to load a config:
m2sh load -db config.sqlite -config tests/sample_conf.py
This will erase the previous config, but we'll make it
safer later on.
"""
import imp
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot access database file %s" % db
return
try:
model.begin(db, clear=clear)
imp.load_source('mongrel2_config_main', config)
commit_command(db=db, what="load_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "SQLite error: %s" % exc
except SyntaxError,exc:
print "Syntax error: %s" % exc
def config_command(db=None, config=None, clear=True):
"""
Effectively does an init then load of a config to get
you started quicker:
m2sh config -db config.sqlite -config tests/sample_conf.py
Like the other two, this will nuke your config, but we'll
make it safer later.
"""
init_command(db=db)
load_command(db=db, config=config, clear=clear)
def commit_command(db=None, what=None, why=None):
"""
Used to a commit event to the database for other admins to know
what is going on with the config. The system logs quite a lot
already for you, like your username, machine name, etc:
m2sh commit -db test.sqlite -what mongrel2.org \
-why "Needed to change paters."
In future versions it will prevent you from committing as root,
because only assholes commit from root.
Both parameters are arbitrary, but I like to record what I did to
different Hosts in servers.
"""
import socket
store = model.load_db("sqlite:" + db)
who = unicode(getpass.getuser())
if who == u'root':
print "Commit from root eh? Man, you're kind of a tool."
log = model.Log()
log.who = who
log.what = unicode(what)
log.why = unicode(why)
log.location = unicode(socket.gethostname())
log.how = u'm2sh'
store.add(log)
store.commit()
def log_command(db=None, count=20):
"""
Dumps commit logs:
m2sh log -db test.sqlite -count 20
m2sh log -db test.sqlite
So you know who to blame.
"""
store = model.load_db("sqlite:" + db)
logs = store.find(model.Log)
for log in logs.order_by(model.Log.happened_at)[0:count]:
print log
def find_servers(db=None, uuid="", host="", name="", every=False):
"""
Finds all the servers which match the given uuid, host or name.
If every is true all servers in the database will be returned.
"""
store = model.begin(db)
servers = []
if every:
servers = store.find(model.Server)
elif uuid:
servers = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
servers = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
servers = store.find(model.Server, model.Server.name == unicode(name))
if servers.count() > 1 and not every:
print "Not sure which server to run, what I found:"
print "NAME HOST UUID"
print "--------------"
for server in servers:
print server.name, server.default_host, server.uuid
print "* Use -every to run them all."
return []
else:
return servers
def start_command(db=None, uuid= "", host="", name="", sudo=False, every=False):
"""
Does a simple start of the given server(s) identified by the uuid, host
(default_host) parameter or the name.:
m2sh start -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh start -db config.sqlite -host localhost
m2sh start -db config.sqlite -name test
m2sh start -db config.sqlite -every
Give the -sudo options if you want it to start mongrel2 as root for you
(must have sudo installed).
Give the -every option if you want mongrel2 to launch all servers listed in
the given db.
If multiple servers match and -every is not given, m2sh will ask you which
to start.
"""
root_enabler = 'sudo' if sudo else ''
servers = find_servers(db, uuid, host, name, every)
if not servers or servers.count() == 0:
print 'No matching servers found, nothing launched'
else:
for server in servers:
print 'Launching server %s %s on port %d' % (server.name, server.uuid, server.port)
os.system('%s mongrel2 %s %s' % (root_enabler, db, server.uuid))
def stop_command(db=None, uuid="", host="", name="", every=False, murder=False):
"""
Stops a running mongrel2 process according to the host, either
gracefully (INT) or murderous (TERM):
m2sh stop -db config.sqlite -host localhost
m2sh stop -db config.sqlite -host localhost -murder
m2sh stop -db config.sqlite -name test -murder
m2sh stop -db config.sqlite -every
You shouldn't need sudo to stop a running mongrel if you
are also the user that owns the chroot directory or root.
Normally mongrel2 will wait until connections die off before really
leaving, but you can give it the -murder flag and it'll nuke it
semi-gracefully. You can also do it again with -murder if it's waiting
for some dead connections and you want it to just quit.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
sig = signal.SIGTERM if murder else signal.SIGINT
os.kill(pid, sig)
def reload_command(db=None, uuid="", host="", name="", every=False):
"""
Causes Mongrel2 to do a soft-reload which will re-read the config
database and then attempt to load a whole new configuration without
losing connections on the previous one:
m2sh reload -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh reload -db config.sqlite -host localhost
m2sh reload -db config.sqlite -name test
m2sh reload -db config.sqlite -every
This reload will need access to the config database from within the
chroot for it to work, and it's not totally guaranteed to be 100%
reliable, but if you are doing development and need to do quick changes
then this is what you do.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
os.kill(pid, signal.SIGHUP)
def running_command(db=None, uuid="", host="", name="", every=False):
"""
Tells you if the given server is still running:
m2sh running -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh running -db config.sqlite -host localhost
m2sh running -db config.sqlite -name test
m2sh running -db config.sqlite -every
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
# TODO: Clean this up.
if pid:
try:
os.kill(pid, 0)
print "Found server %s %s RUNNING at PID %i" % (server.name,
server.uuid,
pid)
except OSError:
print "Server %s %s NOT RUNNING at PID %i" % (server.name,
server.uuid,
pid)
def control_command(db=None, host="", name="", uuid=""):
"""
Start a simple control console for working with mongrel2.
This is *very* bare bones at the moment but should improve.
m2sh control -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh control -db config.sqlite -host localhost
m2sh control -db config.sqlite -name test
"""
store = model.load_db("sqlite:" + db)
import zmq
servers = find_servers(db, uuid, host, name, False)
if servers:
server = servers[0]
CTX = zmq.Context()
results = store.find(model.Setting, model.Setting.key == unicode("control_port"))
addr = results[0].value if results.count() > 1 else "ipc://run/control"
ctl = CTX.socket(zmq.REQ)
print "CONNECTING to: %s in %s" % (addr, server.chroot)
os.chdir(server.chroot)
ctl.connect(addr)
try:
while True:
cmd = raw_input("> ")
ctl.send(cmd)
print ctl.recv()
except EOFError:
ctl.close()
def get_server_pid(server):
pid_file = os.path.realpath(server.chroot + server.pid_file)
if not os.path.isfile(pid_file):
print "PID file %s not found for server %s %s" % (pid_file,
server.name,
server.uuid)
return None
else:
return int(open(pid_file, 'r').read())
def version_command():
"""
Prints out the version of your mongrel2 binary."
"""
print "Mongrel2/1.7.5"
|
smalls257/VRvisu
|
refs/heads/master
|
Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/test/test_difflib.py
|
87
|
import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestWithAscii(unittest.TestCase):
def test_one_insert(self):
sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('insert', 0, 0, 0, 1),
('equal', 0, 100, 1, 101)])
sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 50, 0, 50),
('insert', 50, 50, 50, 51),
('equal', 50, 100, 51, 101)])
def test_one_delete(self):
sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40)
self.assertAlmostEqual(sm.ratio(), 0.994, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 40, 0, 40),
('delete', 40, 41, 40, 40),
('equal', 41, 81, 40, 80)])
class TestAutojunk(unittest.TestCase):
"""Tests for the autojunk parameter added in 2.7"""
def test_one_insert_homogenous_sequence(self):
# By default autojunk=True and the heuristic kicks in for a sequence
# of length 200+
seq1 = 'b' * 200
seq2 = 'a' + 'b' * 200
sm = difflib.SequenceMatcher(None, seq1, seq2)
self.assertAlmostEqual(sm.ratio(), 0, places=3)
# Now turn the heuristic off
sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False)
self.assertAlmostEqual(sm.ratio(), 0.9975, places=3)
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_range_format_unified(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
Each <range> field shall be of the form:
%1d", <beginning line number> if the range contains exactly one line,
and:
"%1d,%1d", <beginning line number>, <number of lines> otherwise.
If a range is empty, its beginning line number shall be the number of
the line just before the range, or 0 if the empty range starts the file.
'''
fmt = difflib._format_range_unified
self.assertEqual(fmt(3,3), '3,0')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,2')
self.assertEqual(fmt(3,6), '4,3')
self.assertEqual(fmt(0,0), '0,0')
def test_range_format_context(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
The range of lines in file1 shall be written in the following format
if the range contains two or more lines:
"*** %d,%d ****\n", <beginning line number>, <ending line number>
and the following format otherwise:
"*** %d ****\n", <ending line number>
The ending line number of an empty range shall be the number of the preceding line,
or 0 if the range is at the start of the file.
Next, the range of lines in file2 shall be written in the following format
if the range contains two or more lines:
"--- %d,%d ----\n", <beginning line number>, <ending line number>
and the following format otherwise:
"--- %d ----\n", <ending line number>
'''
fmt = difflib._format_range_context
self.assertEqual(fmt(3,3), '3')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,5')
self.assertEqual(fmt(3,6), '4,6')
self.assertEqual(fmt(0,0), '0')
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(
TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,
TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
|
globocom/database-as-a-service
|
refs/heads/master
|
dbaas/physical/migrations/0100_auto__del_field_pool_endpoint__add_field_pool_rancher_endpoint__add_fi.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Pool.endpoint'
db.delete_column(u'physical_pool', 'endpoint')
# Adding field 'Pool.rancher_endpoint'
db.add_column(u'physical_pool', 'rancher_endpoint',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'Pool.cluster_endpoint'
db.add_column(u'physical_pool', 'cluster_endpoint',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Pool.endpoint'
db.add_column(u'physical_pool', 'endpoint',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Deleting field 'Pool.rancher_endpoint'
db.delete_column(u'physical_pool', 'rancher_endpoint')
# Deleting field 'Pool.cluster_endpoint'
db.delete_column(u'physical_pool', 'cluster_endpoint')
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.pool': {
'Meta': {'object_name': 'Pool'},
'cluster_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'cluster_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'pools'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rancher_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['account.Team']", 'symmetrical': 'False'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
EightMedia/djangopypi
|
refs/heads/master
|
djangopypi/admin.py
|
3
|
from django.conf import settings
from django.contrib import admin
from djangopypi.models import *
admin.site.register(Package)
admin.site.register(Release)
admin.site.register(Classifier)
admin.site.register(Distribution)
admin.site.register(Review)
if getattr(settings,'DJANGOPYPI_MIRRORING', False):
admin.site.register(MasterIndex)
admin.site.register(MirrorLog)
|
sahiljain/catapult
|
refs/heads/master
|
third_party/py_vulcanize/third_party/rjsmin/_setup/py2/dist.py
|
43
|
# -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
================
dist utilities
================
dist utilities.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import sys as _sys
from _setup import shell as _shell
def run_setup(*args, **kwargs):
""" Run setup """
if 'setup' in kwargs:
script = kwargs.get('setup') or 'setup.py'
del kwargs['setup']
else:
script = 'setup.py'
if 'fakeroot' in kwargs:
fakeroot = kwargs['fakeroot']
del kwargs['fakeroot']
else:
fakeroot = None
if kwargs:
raise TypeError("Unrecognized keyword parameters")
script = _shell.native(script)
argv = [_sys.executable, script] + list(args)
if fakeroot:
argv.insert(0, fakeroot)
return not _shell.spawn(*argv)
|
zorojean/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_base.py
|
216
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
wubr2000/googleads-python-lib
|
refs/heads/master
|
examples/dfp/v201505/user_service/get_all_roles.py
|
4
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all roles.
This sample can be used to determine which role id is needed when getting and
creating users."""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201505')
# Get all roles.
roles = user_service.getAllRoles()
# Display results.
for role in roles:
print ('Role with id \'%s\' and name \'%s\' was found.'
% (role['id'], role['name']))
print '\nNumber of results found: %s' % len(roles)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
bellowsj/aiopogo
|
refs/heads/master
|
aiopogo/pogoprotos/data/avatar/avatar_item_pb2.py
|
1
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/avatar/avatar_item.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/avatar/avatar_item.proto',
package='pogoprotos.data.avatar',
syntax='proto3',
serialized_pb=_b('\n(pogoprotos/data/avatar/avatar_item.proto\x12\x16pogoprotos.data.avatar\"R\n\nAvatarItem\x12\x1a\n\x12\x61vatar_template_id\x18\x01 \x01(\t\x12\x18\n\x10new_timestamp_ms\x18\x02 \x01(\x03\x12\x0e\n\x06viewed\x18\x03 \x01(\x08\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_AVATARITEM = _descriptor.Descriptor(
name='AvatarItem',
full_name='pogoprotos.data.avatar.AvatarItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='avatar_template_id', full_name='pogoprotos.data.avatar.AvatarItem.avatar_template_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_timestamp_ms', full_name='pogoprotos.data.avatar.AvatarItem.new_timestamp_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='viewed', full_name='pogoprotos.data.avatar.AvatarItem.viewed', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name['AvatarItem'] = _AVATARITEM
AvatarItem = _reflection.GeneratedProtocolMessageType('AvatarItem', (_message.Message,), dict(
DESCRIPTOR = _AVATARITEM,
__module__ = 'pogoprotos.data.avatar.avatar_item_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.avatar.AvatarItem)
))
_sym_db.RegisterMessage(AvatarItem)
# @@protoc_insertion_point(module_scope)
|
Chuban/moose
|
refs/heads/devel
|
python/TestHarness/XMLDiffer.py
|
4
|
import os, traceback
import xml.etree.ElementTree as xml
##
# Stores error information needed for printing diff messages
class XMLError(object):
##
# Constructor.
# @param err A one line error message
# @param msg A detailed message describing the error
def __init__(self, err, msg):
self.error = err
self.message = msg
##
# A class for finding difference between XML documents
class XMLDiffer(object):
##
# Constructor.
# @param file1 The master (gold) file to check against
# @param file2 The file to compare to the master file
#
# Optional Arguments:
# abs_zero: Any value less than this is assumed zero (default: 1e-11)
# rel_tol: Relative tolerance to check numeric values against (default: 5.5e-6)
# max_values: The maximum number of values to test
def __init__(self, file1, file2, **kwargs):
# Store the file names
self._file = [file1, file2]
# Extract the optional arguments
self._abs_zero = float(kwargs.pop('abs_zero', 1e-11))
self._rtol = float(kwargs.pop('rel_tol', 5.5e-6))
self._ignored_attributes = kwargs.pop('ignored_attributes', [])
# Storage for XMLError objects
self._errors = []
# Extract the XML tree from the files
self._root1 = self._extractXML(file1)
self._root2 = self._extractXML(file2)
# Perform the comparison
self._compare()
##
# Check the comparison status (public)
# Returns True if the comparison fails
def fail(self):
return len(self._errors) > 0
##
# Print the error message(s) (public)
# @return The output as a single string
def message(self, **kwargs):
# Header
output = []
output.append('Running XMLDiffer.py')
output.append( ' File 1: ' + self._file[0])
output.append( ' File 2: ' + self._file[1])
output.append( ' rel_tol: ' + str(self._rtol))
output.append( ' abs_zero: ' + str(self._abs_zero))
output.append( ' No. of errors: ' + str(len(self._errors)))
# Errors
cnt = 0
for e in self._errors:
cnt += 1
output.append('\n')
output.append('ERROR ' + str(cnt) + ':')
output.append(' ' + e.error)
for m in e.message:
output.append(' ' + m)
# Print the output
if kwargs.pop('output', False):
print '\n'.join(output)
# Return the text, as a single string
return '\n'.join(output)
##
# Add an XMLError object to the storage vector (private)
# @param err A string containing the error message or an XMLError object
# @param msg A detailed message for the error (ignored if XMLError is passed to err)
def _addError(self, err, msg=[]):
# Add object directly
if isinstance(err, XMLError):
self._errors.append(err)
# Create and add the object
else:
obj = XMLError(err, msg)
self._errors.append(obj)
##
# Reads the XML file (private)
# @param filename The name of the XML file to read
# @return An xml.etree.ElementTree root object
def _extractXML(self, filename):
# Check for file existence
if not os.path.isfile(filename):
self._addError('Could not open ' + filename + ', the file does not exist.')
return None
# Get the root object of the XML tree
try:
root = xml.parse(filename).getroot()
# Catch parser errors
except xml.ParseError:
err = 'An XML parser error occurred attempting to read XML tree from ' + filename + '.'
msg = traceback.format_exc().splitlines()
self._addError(err, msg)
root = None
# Return the object
return root
##
# Perform the block by block comparison (private)
def _compare(self):
# Define local variables
root = [self._root1, self._root2]
# Do nothing if the objects do not exist
if root[0] == None or root[1] == None:
return
# Loop through each tree object in the master file
for elem0 in root[0].getiterator():
# Initialize the result and error storage
results = []
errors = []
# Loop through all blocks in the second file with the current tag
for elem1 in root[1].getiterator(elem0.tag):
# Perform the comparison
r, e = self._compareBlock(elem0, elem1)
# Append the test results
results.append(r)
errors.append(e)
# If all results are False, there was no match
if not any(results):
# Filter out errors (elem.text failure)
errors = filter(None, errors)
# If no errors exist there was no block or block with identical attributes located
if len(errors) == 0:
msg = self._getAttrib(elem0)
if len(msg) == 0:
err = 'Unable to locate an XML Block with the tag "' + elem0.tag + '" in file 2.'
self._addError(err)
else:
err = 'Unable to locate an XML Block with the tag "' + elem0.tag + '" and the following attributes in file 2.'
self._addError(err, msg)
# Had a text error within similar blocks
else:
for e in errors:
self._addError(e)
##
# Compares XML blocks (private)
# This function first compares the XML block attributes, if those match
# then the XML text is also compared.
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
# @return A pair containing the test result (True or False) and an error indicator,
# this 'indicator' is None if the result of the match is True or if the
# attributes fail to match. When the text fails to match then it contains
# the XMLError object.
def _compareBlock(self, elem0, elem1):
# Perform attribute comparison in both directions: ensure that
# every attribute in the gold file is in the output file, and
# vice-versa.
test_attrib = self._compareAttributes(elem0, elem1) and self._compareAttributes(elem1, elem0)
# If the attributes match, compare the text and return those results
if test_attrib:
test_text, err = self._compareText(elem0, elem1)
return test_text, err
# Otherwise the attributes do match
else:
return False, None
##
# Perform attribute comparison (private)
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
def _compareAttributes(self, elem0, elem1):
# Initialize the output (assume match)
result = True
# Loop through each attribute of the master object
for key0, value0 in elem0.attrib.iteritems():
# If this key is one of the attributes we're ignoring, then ignore it!
if key0 in self._ignored_attributes:
continue
# Attribute is missing from the slave object, match fails
if not elem1.attrib.has_key(key0):
return False
# If the slave object has the same attribute, perform a comparison
elif elem1.attrib.has_key(key0):
value1 = elem1.attrib[key0]
# Attempt to perform a numeric comparison
try:
tvalue, rel_diff = self._isClose(value0, value1)
if not tvalue:
return False
except:
if value0 != value1:
return False
# Return the results
return result
## Perform comparison of text for two XML blocks (private)
# @param elem0 The master XML element object
# @param elem1 The XML element object to compare the master against
# @return A pair of items, either True, None or False, XMLError
def _compareText(self, elem0, elem1):
# Initialize the output
result = True
err = None
# Return if no text exists
if elem0.text == None and elem1.text == None:
return (result, err)
elif elem0.text == None or elem1.text == None:
return (False, err)
# Convert the text to a list of strings
text0 = elem0.text.replace('\n', '').strip().split(' ')
text1 = elem1.text.replace('\n', '').strip().split(' ')
text0 = filter(None, text0)
text1 = filter(None, text1)
# Check that the lengths are the same
if len(text0) != len(text1):
result = False
err = 'An XML block with the tag "' + elem0.tag + '" and the following attributes exists in both files, but the blocks have a different number of values.'
msg = self._getAttrib(elem0)
msg.append('No. items file 1: ' + '%d' % len(text0))
msg.append('No. items file 2: ' + '%d' % len(text1))
err = XMLError(err, msg)
return (False, err)
for i in xrange(len(text0)):
value, rel_diff = self._isClose(text0[i], text1[i])
if not value:
err = 'An XML block with the tag "' + elem0.tag + '" and the following attributes has differing values on file 2.'
msg = self._getAttrib(elem0)
msg.append('Index ' + str(i) + ' : ' + text0[i] + ' ~ ' + text1[i] + ', rel diff: ' + '%e' % rel_diff)
err = XMLError(err, msg)
return (False, err)
return result, err
##
# Perform relative tolerance check between two numbers (private)
# @param value0 A string or list of strings containing the first number
# @param value1 A string or list of strings containing the second number
def _isClose(self, value0, value1):
# Return values
result = True
rel_diff = 0
# Convert the strings to floats
value0 = float(value0)
value1 = float(value1)
# Apply the absolute zeros
if abs(value0) < self._abs_zero:
value0 = 0
if abs(value1) < self._abs_zero:
value1 = 0
# Check for zero
if value0 == 0 and value1 == 0:
result = True
# Check the relative error
else:
rel_diff = abs( ( value0 - value1 ) / max( abs(value0), abs(value1) ) )
if rel_diff > self._rtol:
result = False
# Return the comparison
return result, rel_diff
##
# Get the attributes (dict) as a string (private)
# @return Attribute message string
def _getAttrib(self, elem):
msg = []
for k, v in elem.attrib.iteritems():
msg.append(' ' + k + ' = ' + v)
return msg
if __name__ == '__main__':
# You can run XMLDiffer.py as a stand-alone by putting two XML files
# in the variable names file1 and file2 below, and then running:
#
# python $MOOSE_DIR/python/TestHarness/XMLDiffer.py
file1 = os.path.join(os.getenv('MOOSE_DIR'), 'test', 'tests', 'outputs', 'vtk', 'vtk_diff_serial_mesh_parallel_out_005.pvtu')
file2 = os.path.join(os.getenv('MOOSE_DIR'), 'test', 'tests', 'outputs', 'vtk', 'gold', 'vtk_diff_serial_mesh_parallel_out_005.pvtu')
d = XMLDiffer(file1, file2, ignored_attributes=['header_type'])
if not d.fail():
print 'Files are the same\n'
else:
print d.message()
|
anushreejangid/csmpe-main
|
refs/heads/master
|
csmpe/core_plugins/csm_install_operations/ios_xe/activate.py
|
1
|
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# from package_lib import SoftwarePackage
import re
from time import time
from datetime import datetime
from csmpe.plugins import CSMPlugin
from install import expand_subpkgs
from install import install_activate_reload
from install import install_activate_write_memory
from install import install_activate_issu
from csmpe.core_plugins.csm_get_inventory.ios_xe.plugin import get_package, get_inventory
from csmpe.core_plugins.csm_install_operations.utils import update_device_info_udi
from utils import remove_exist_image
from utils import xe_show_platform
from utils import install_add_remove
from utils import installed_package_name
class Plugin(CSMPlugin):
"""This plugin Activates packages on the device."""
name = "Install Activate Plugin"
platforms = {'ASR900'}
phases = {'Activate'}
os = {'XE'}
def run(self):
"""
Performs install activate operation
"""
rsp_count = int(self.ctx.load_data('xe_rsp_count')[0])
pkg = self.ctx.load_data('xe_activate_pkg')[0]
mode = self.ctx.load_data('xe_boot_mode')[0]
folder = self.ctx.load_data('xe_install_folder')[0]
self.ctx.info("Activate number of RSP = {}".format(rsp_count))
self.ctx.info("Activate package = {}".format(pkg))
self.ctx.info("Activate package mode = {}".format(mode))
self.ctx.info("Install folder = {}".format(folder))
self.ctx.info("Activate package(s) pending")
self.ctx.post_status("Activate Package(s) Pending")
prompt = self.ctx._connection.hostname
# issu: need to copy the consolidated image to the installed folder
if mode == 'issu':
cmd = 'copy bootflash:' + pkg + ' ' + folder + '/' + pkg
install_add_remove(self.ctx, cmd)
# package = 'bootflash:' + pkg
# remove_exist_image(self.ctx, package)
# subpackage: need to expand the consolidated image to the installed folder
if mode == 'subpackage':
result = expand_subpkgs(self.ctx, rsp_count, folder, pkg)
if not result:
self.ctx.error("Error in extracting sub-images from the consolidated "
"image {}".format(pkg))
return
# configurations
cmd = "configure terminal"
self.ctx.send(cmd, wait_for_string=prompt)
cmd = "config-register 0x2102"
self.ctx.send(cmd, wait_for_string=prompt)
if mode == 'issu':
cmd = 'redundancy'
self.ctx.send(cmd, wait_for_string=prompt)
cmd = 'mode sso'
self.ctx.send(cmd, wait_for_string=prompt)
else:
cmd = "no boot system"
self.ctx.send(cmd, wait_for_string=prompt)
if mode == 'consolidated':
cmd = "boot system bootflash:" + pkg
else:
cmd = 'boot system ' + folder + '/packages.conf'
self.ctx.send(cmd, wait_for_string=prompt)
self.ctx.send('end', wait_for_string=prompt)
cmd = "write memory"
install_activate_write_memory(self.ctx, cmd, self.ctx._connection.hostname)
# self.ctx.send(cmd, timeout=300, wait_for_string=prompt)
# Start activation
if mode == 'issu':
cmd = 'request platform software package install node file ' + \
folder + '/' + pkg + ' interface-module-delay 160'
install_activate_issu(self.ctx, cmd)
else:
install_activate_reload(self.ctx)
self.ctx.info("Activate package done")
# Refresh package and inventory information
get_package(self.ctx)
get_inventory(self.ctx)
update_device_info_udi(self.ctx)
# Verify the version
activate_success = True
if self.ctx._connection.os_version not in pkg:
activate_success = False
self.ctx.warning('The post-activate OS Version: {} while Activate package = '
'{}'.format(self.ctx._connection.os_version, pkg))
# Verify the Image type
if mode == 'issu' or mode == 'subpackage':
pkg_conf = folder + '/packages.conf'
image_type = installed_package_name(self.ctx, pkg_conf)
if not image_type:
activate_success = False
self.ctx.warning('{} does not exist.'.format(pkg_conf))
if image_type not in pkg:
activate_success = False
self.ctx.warning('The post-activate image type: {} while Activate package = '
'{}'.format(image_type, pkg))
else:
# mode is consolidated
output = self.ctx.send('show version | include ^System image')
if output:
m = re.search('"bootflash:(.*)"$', output)
if m:
image_type = m.group(1)
if image_type not in pkg:
self.ctx.warning('The post-activate image type: {} while Activate '
'package = {}'.format(image_type, pkg))
else:
activate_success = False
self.ctx.info('show version = {}'.format(output))
self.ctx.warning('System image not found in show version: {}'.format(output))
else:
activate_success = False
self.ctx.info('show version = {}'.format(output))
self.ctx.warning('System image not found in show version: {}'.format(output))
if not activate_success:
# Refresh package information
get_package(self.ctx)
update_device_info_udi(self.ctx)
self.ctx.warning('Activte image {} may have failed. Please see above warnings'.format(pkg))
if mode == 'issu':
# Remove all-in-one image from the installed folder
package = folder + '/' + pkg
remove_exist_image(self.ctx, package)
if rsp_count == 2:
package = 'stby-' + package
remove_exist_image(self.ctx, package)
# Verify the status of RP and SIP
previous_data, timestamp = self.ctx.load_data('xe_show_platform')
self.ctx.info("Pre-Activate data collected on {}".format(
datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')))
if timestamp < time() - (60 * 60 * 2): # two hours
self.ctx.warning("Pre-Activate phase 'show platform' "
"data older than 2 hours")
current_data = xe_show_platform(self.ctx)
if not current_data:
self.ctx.warning("The CLI 'show platform' is not able to determine the status of RP and SIP ")
return
for Slot, Status in previous_data.items():
Type = Status[0]
previous_state = Status[1]
current_state = current_data[Slot][1]
if previous_state != current_state:
if previous_state == 'ok, active' and current_state == 'ok, standby' \
or previous_state == 'ok, standby' and current_state == 'ok, active':
continue
self.ctx.warning("Slot {} Type {} state changes after Activation".format(Slot, Type))
self.ctx.warning("\t Pre-Activate State = {} vs. Post-Activate State = "
"{}".format(previous_state, current_state))
if 'ok' not in current_state:
self.ctx.warning("Slot {} Type {} is not in 'ok' state after "
"activation".format(Slot, Type))
self.ctx.info("The status of RP and SIP has been verified. Please check any warnings in plugins.log")
|
abogeorge/simpleTicket
|
refs/heads/master
|
simpleTicket/siteEngine/urls.py
|
1
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
# User auth Page
url(r'^login/$', views.login, name='login'),
url(r'^login_auth/$', views.login_auth, name="login_auth"),
url(r'^logout/$', views.logout, name='logout'),
# Account Information
url(r'^myaccount/$', views.myaccount, name='myaccount'),
# Contact Page
url(r'^contact/$', views.contact, name='contact'),
# Services Page
url(r'^services/$', views.services, name='services'),
# Create Ticket
url(r'^ticketcreate/$', views.create_ticket, name='create_ticket'),
# View Active Tickets
url(r'^ticketsactive/$', views.active_tickets, name='active_tickets'),
# View Closed Tickets
url(r'^ticketsclosed/$', views.closed_tickets, name='closed_tickets'),
# Create Order
url(r'^ordercreate/$', views.create_order, name='create_order'),
# View Active Orders
url(r'^ordersactive/$', views.active_orders, name='active_orders'),
# View Closed Orders
url(r'^ordersclosed/$', views.closed_orders, name='closed_orders'),
# View All Subalterns
url(r'^subalterns/$', views.subalterns, name='subalterns'),
# Approve subalterns open tickets
url(r'^subalterns_tickets/$', views.subalterns_tickets, name='subalterns_tickets'),
# Change subalterns open tickets status
url(r'^subalterns_ticket_cs/$', views.subalterns_ticket_cs, name='subalterns_ticket_cs'),
# Approve subalterns placed orders
url(r'^subalterns_orders/$', views.subalterns_orders, name='subalterns_orders'),
# Change subalterns open orders status
url(r'^subalterns_order_cs/$', views.subalterns_order_cs, name='subalterns_order_cs'),
]
|
Austin503/pyglet
|
refs/heads/master
|
examples/joystick.py
|
29
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet.gl import *
joysticks = pyglet.input.get_joysticks()
assert joysticks, 'No joystick device is connected'
joystick = joysticks[0]
joystick.open()
window = pyglet.window.Window()
@window.event
def on_draw():
x = (0.8*joystick.x + 1) * window.width / 2
y = (-0.8*joystick.y + 1) * window.height / 2
z = joystick.z
angle = joystick.rz * 180
# Axes
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1, 0, 0)
glLoadIdentity()
glTranslatef(x, y, 0)
glScalef(1 + z, 1 + z, 1 + z)
glRotatef(-angle, 0, 0, 1)
glBegin(GL_TRIANGLES)
glVertex2f(-10, 0)
glVertex2f(0, 13)
glVertex2f(10, 0)
glEnd()
# Buttons
glLoadIdentity()
x = 10
y = 10
glPointSize(5)
glBegin(GL_POINTS)
for button in joystick.buttons:
if button:
glVertex2f(x, y)
x += 20
glEnd()
# Hat
glColor3f(0, 0, 1)
x = window.width / 2
y = window.height / 2
glBegin(GL_POINTS)
glVertex2f(x + joystick.hat_x * 50, y + joystick.hat_y * 50)
glEnd()
pyglet.clock.schedule(lambda dt: None)
pyglet.app.run()
|
dhorelik/django-cms
|
refs/heads/develop
|
cms/south_migrations/0019_public_table_renames.py
|
1680
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
jmerkow/VTK
|
refs/heads/master
|
Examples/GUI/Python/ImageTracerWidget.py
|
22
|
#!/usr/bin/env python
# initial translation from the tcl by VTK/Utilities/tcl2py.py
# further cleanup and fixes to the translation by Charl P. Botha
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This example demonstrates how to use the vtkImageTracerWidget
# to trace on a slice of a 3D image dataset on one of its orthogonal planes.
# The button actions and key modifiers are as follows for controlling the
# widget:
# 1) left button click over the image, hold and drag draws a free hand line.
# 2) left button click and release erases the widget line, if it exists, and
# repositions the handle.
# 3) middle button click starts a snap line. The snap line can be
# terminated by clicking the middle button while depressing the ctrl key.
# 4) when tracing or snap drawing a line, if the last cursor position is
# within specified tolerance to the first handle, the widget line will form
# a closed loop with only one handle.
# 5) right button clicking and holding on any handle that is part of a snap
# line allows handle dragging. Any existing line segments are updated
# accordingly.
# 6) ctrl key + right button down on any handle will erase it. Any existing
# snap line segments are updated accordingly. If the line was formed by
# continous tracing, the line is deleted leaving one handle.
# 7) shift key + right button down on any snap line segment will insert a
# handle at the cursor position. The snap line segment is split accordingly.
#
#
def AdjustSpline(evt, obj):
itw.GetPath(poly)
npts = itw.GetNumberOfHandles()
if npts < 2:
imageActor2.GetMapper().SetInputConnection(extract.GetOutputPort())
return
closed = itw.IsClosed()
if closed:
isw.ClosedOn()
else:
isw.ClosedOff()
imageActor2.GetMapper().SetInputConnection(extract.GetOutputPort())
isw.SetNumberOfHandles(npts)
for i in range(0, npts):
pt = poly.GetPoints().GetPoint(i)
isw.SetHandlePosition(i, pt[0], pt[1], pt[2])
if closed:
isw.GetPolyData(spoly)
imageActor2.GetMapper().SetInputConnection(stencil.GetOutputPort())
stencil.Update()
def AdjustTracer(evt, obj):
npts = isw.GetNumberOfHandles()
points.SetNumberOfPoints(npts)
for i in range(0, npts):
pt = isw.GetHandlePosition(i)
points.SetPoint(i, pt[0], pt[1], pt[2])
closed = isw.GetClosed()
if closed:
isw.GetPolyData(spoly)
imageActor2.GetMapper().SetInputConnection(stencil.GetOutputPort())
stencil.Update()
itw.InitializeHandles(points)
# Start by loading some data.
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetDataByteOrderToLittleEndian()
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
v16.SetFilePrefix("%s/Data/headsq/quarter" % (VTK_DATA_ROOT,))
v16.Update()
#
srange = v16.GetOutput().GetScalarRange()
min = srange[0]
max = srange[1]
diff = max-min
slope = 255.0/diff
inter = -slope*min
shift = inter/slope
shifter = vtk.vtkImageShiftScale()
shifter.SetShift(shift)
shifter.SetScale(slope)
shifter.SetOutputScalarTypeToUnsignedChar()
shifter.SetInputConnection(v16.GetOutputPort())
shifter.ReleaseDataFlagOff()
shifter.Update()
# Display a y-z plane.
#
imageActor = vtk.vtkImageActor()
imageActor.GetMapper().SetInputConnection(shifter.GetOutputPort())
imageActor.VisibilityOn()
imageActor.SetDisplayExtent(31, 31, 0, 63, 0, 92)
imageActor.InterpolateOff()
#
spc = shifter.GetOutput().GetSpacing()
orig = shifter.GetOutput().GetOrigin()
x0 = orig[0]
xspc = spc[0]
pos = x0+xspc*31.0
# An alternative would be to formulate position in this case by:
# set bounds [imageActor GetBounds]
# set pos [lindex $bounds 0]
#
#
ren = vtk.vtkRenderer()
ren.SetBackground(0.4, 0.4, 0.5)
ren2 = vtk.vtkRenderer()
ren2.SetBackground(0.5, 0.4, 0.4)
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.AddRenderer(ren2)
renWin.SetSize(600, 300)
#
ren.SetViewport(0, 0, 0.5, 1)
ren2.SetViewport(0.5, 0, 1, 1)
#
interactor = vtk.vtkInteractorStyleImage()
#
iren = vtk.vtkRenderWindowInteractor()
iren.SetInteractorStyle(interactor)
iren.SetRenderWindow(renWin)
#
extract = vtk.vtkExtractVOI()
extract.SetVOI(31, 31, 0, 63, 0, 92)
extract.SetSampleRate(1, 1, 1)
extract.SetInputConnection(shifter.GetOutputPort())
extract.ReleaseDataFlagOff()
#
imageActor2 = vtk.vtkImageActor()
imageActor2.GetMapper().SetInputConnection(extract.GetOutputPort())
imageActor2.VisibilityOn()
imageActor2.SetDisplayExtent(31, 31, 0, 63, 0, 92)
imageActor2.InterpolateOff()
#
# Set up the image tracer widget
#
itw = vtk.vtkImageTracerWidget()
#
# Set the tolerance for capturing last handle when near first handle
# to form closed paths.
#
itw.SetCaptureRadius(1.5)
itw.GetGlyphSource().SetColor(1, 0, 0)
#
# Set the size of the glyph handle
#
itw.GetGlyphSource().SetScale(3.0)
#
# Set the initial rotation of the glyph if desired. The default glyph
# set internally by the widget is a '+' so rotating 45 deg. gives a 'x'
#
itw.GetGlyphSource().SetRotationAngle(45.0)
itw.GetGlyphSource().Modified()
itw.ProjectToPlaneOn()
itw.SetProjectionNormalToXAxes()
itw.SetProjectionPosition(pos)
itw.SetViewProp(imageActor)
itw.SetInputConnection(shifter.GetOutputPort())
itw.SetInteractor(iren)
itw.PlaceWidget()
#
# When the underlying vtkDataSet is a vtkImageData, the widget can be
# forced to snap to either nearest pixel points, or pixel centers. Here
# it is turned off.
#
itw.SnapToImageOff()
#
# Automatically form closed paths.
#
#itw AutoCloseOn
itw.AutoCloseOn()
#
# Set up a vtkSplineWidget in the second renderer and have
# its handles set by the tracer widget.
#
isw = vtk.vtkSplineWidget()
isw.SetCurrentRenderer(ren2)
isw.SetDefaultRenderer(ren2)
isw.SetInputConnection(extract.GetOutputPort())
isw.SetInteractor(iren)
bnds = imageActor2.GetBounds()
isw.PlaceWidget(bnds[0], bnds[1], bnds[2], bnds[3], bnds[4], bnds[5])
isw.ProjectToPlaneOn()
isw.SetProjectionNormalToXAxes()
isw.SetProjectionPosition(pos)
#
# Have the widgets control each others handle positions.
#
itw.AddObserver('EndInteractionEvent',AdjustSpline)
isw.AddObserver('EndInteractionEvent',AdjustTracer)
#
itw.On()
isw.On()
#
poly = vtk.vtkPolyData()
points = vtk.vtkPoints()
spoly = vtk.vtkPolyData()
#
# Set up a pipleline to demonstrate extraction of a 2D
# region of interest. Defining a closed clockwise path using the
# tracer widget will extract all pixels within the loop. A counter
# clockwise path provides the dual region of interest.
#
extrude = vtk.vtkLinearExtrusionFilter()
extrude.SetInputData(spoly)
extrude.SetScaleFactor(1)
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(1, 0, 0)
#
dataToStencil = vtk.vtkPolyDataToImageStencil()
dataToStencil.SetInputConnection(extrude.GetOutputPort())
#
stencil = vtk.vtkImageStencil()
stencil.SetInputConnection(extract.GetOutputPort())
stencil.SetStencilConnection(dataToStencil.GetOutputPort())
stencil.ReverseStencilOff()
stencil.SetBackgroundValue(128)
#
# Add all the actors.
#
ren.AddViewProp(imageActor)
ren2.AddViewProp(imageActor2)
#
# Render the image.
#
renWin.Render()
#
ren.GetActiveCamera().SetViewUp(0, 1, 0)
ren.GetActiveCamera().Azimuth(270)
ren.GetActiveCamera().Roll(270)
ren.GetActiveCamera().Dolly(1.7)
ren.ResetCameraClippingRange()
#
ren2.GetActiveCamera().SetViewUp(0, 1, 0)
ren2.GetActiveCamera().Azimuth(270)
ren2.GetActiveCamera().Roll(270)
ren2.GetActiveCamera().Dolly(1.7)
ren2.ResetCameraClippingRange()
#
# if we don't do this, the widgets disappear behind the imageActor.
vtk.vtkMapper.SetResolveCoincidentTopologyToPolygonOffset()
vtk.vtkMapper.SetResolveCoincidentTopologyPolygonOffsetParameters(10,10)
renWin.Render()
#
iren.Initialize()
renWin.Render()
iren.Start()
|
gangadhar-kadam/verve-erp
|
refs/heads/v5.0
|
erpnext/stock/doctype/item_customer_detail/item_customer_detail.py
|
41
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemCustomerDetail(Document):
pass
|
bitcity/django
|
refs/heads/master
|
django/core/mail/backends/dummy.py
|
835
|
"""
Dummy email backend that does nothing.
"""
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
|
CourseTalk/edx-platform
|
refs/heads/master
|
lms/djangoapps/rss_proxy/models.py
|
65
|
"""
Models for the rss_proxy djangoapp.
"""
from django.db import models
from model_utils.models import TimeStampedModel
class WhitelistedRssUrl(TimeStampedModel):
"""
Model for persisting RSS feed URLs which are whitelisted
for proxying via this rss_proxy djangoapp.
"""
url = models.CharField(max_length=255, unique=True, db_index=True)
class Meta(object):
""" Meta class for this Django model """
app_label = "rss_proxy"
def __unicode__(self):
return unicode(self.url)
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/test_runner/models.py
|
132
|
from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
|
analurandis/Tur
|
refs/heads/master
|
backend/venv/Lib/site-packages/babel/messages/catalog.py
|
86
|
# -*- coding: utf-8 -*-
"""
babel.messages.catalog
~~~~~~~~~~~~~~~~~~~~~~
Data structures for message catalogs.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types, PY2, cmp
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return any(PYTHON_FORMAT.search(id) for id in ids)
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
if PY2:
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
else:
_parse_header = message_from_string
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset=None, fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output (defaults to utf-8)
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
# FIXME: this should use dates.parse_datetime as soon as that
# is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.creation_date = dt.replace(tzinfo=tzoffset)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
# FIXME: this should use dates.parse_datetime as soon as
# that is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and
# minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.revision_date = dt.replace(tzinfo=tzoffset)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
|
NINAnor/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/SimplifyGeometries.py
|
10
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SimplifyGeometries.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QGis, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class SimplifyGeometries(GeoAlgorithm):
INPUT = 'INPUT'
TOLERANCE = 'TOLERANCE'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Simplify geometries')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'),
[ParameterVector.VECTOR_TYPE_POLYGON, ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterNumber(self.TOLERANCE,
self.tr('Tolerance'), 0.0, 10000000.0, 1.0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Simplified')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
tolerance = self.getParameterValue(self.TOLERANCE)
pointsBefore = 0
pointsAfter = 0
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
layer.pendingFields().toList(), layer.wkbType(), layer.crs())
current = 0
selection = vector.features(layer)
total = 100.0 / float(len(selection))
for f in selection:
featGeometry = QgsGeometry(f.geometry())
attrs = f.attributes()
pointsBefore += self.geomVertexCount(featGeometry)
newGeometry = featGeometry.simplify(tolerance)
pointsAfter += self.geomVertexCount(newGeometry)
feature = QgsFeature()
feature.setGeometry(newGeometry)
feature.setAttributes(attrs)
writer.addFeature(feature)
current += 1
progress.setPercentage(int(current * total))
del writer
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Simplify: Input geometries have been simplified from %s to %s points' % (pointsBefore, pointsAfter)))
def geomVertexCount(self, geometry):
geomType = geometry.type()
if geomType == QGis.Line:
if geometry.isMultipart():
pointsList = geometry.asMultiPolyline()
points = sum(pointsList, [])
else:
points = geometry.asPolyline()
return len(points)
elif geomType == QGis.Polygon:
if geometry.isMultipart():
polylinesList = geometry.asMultiPolygon()
polylines = sum(polylinesList, [])
else:
polylines = geometry.asPolygon()
points = []
for l in polylines:
points.extend(l)
return len(points)
else:
return None
|
playpauseandstop/aiohttp
|
refs/heads/master
|
examples/web_ws.py
|
4
|
#!/usr/bin/env python3
"""Example for aiohttp.web websocket server
"""
import asyncio
import os
from aiohttp.web import (Application, Response, WebSocketResponse, WSMsgType,
run_app)
WS_FILE = os.path.join(os.path.dirname(__file__), 'websocket.html')
async def wshandler(request):
resp = WebSocketResponse()
ok, protocol = resp.can_prepare(request)
if not ok:
with open(WS_FILE, 'rb') as fp:
return Response(body=fp.read(), content_type='text/html')
await resp.prepare(request)
try:
print('Someone joined.')
for ws in request.app['sockets']:
await ws.send_str('Someone joined')
request.app['sockets'].append(resp)
async for msg in resp:
if msg.type == WSMsgType.TEXT:
for ws in request.app['sockets']:
if ws is not resp:
await ws.send_str(msg.data)
else:
return resp
return resp
finally:
request.app['sockets'].remove(resp)
print('Someone disconnected.')
for ws in request.app['sockets']:
await ws.send_str('Someone disconnected.')
async def on_shutdown(app):
for ws in app['sockets']:
await ws.close()
async def init(loop):
app = Application()
app['sockets'] = []
app.router.add_get('/', wshandler)
app.on_shutdown.append(on_shutdown)
return app
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(loop))
run_app(app)
|
koobonil/Boss2D
|
refs/heads/master
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/layers/utils_test.py
|
63
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.layers import utils
from tensorflow.python.platform import test
class ConvUtilsTest(test.TestCase):
def testConvertDataFormat(self):
self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5))
self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4))
self.assertEqual('NCW', utils.convert_data_format('channels_first', 3))
self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4))
self.assertEqual('NWC', utils.convert_data_format('channels_last', 3))
self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5))
with self.assertRaises(ValueError):
utils.convert_data_format('invalid', 2)
def testNormalizeTuple(self):
self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides'))
self.assertEqual(
(2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides'))
with self.assertRaises(ValueError):
utils.normalize_tuple((2, 1), n=3, name='strides')
with self.assertRaises(ValueError):
utils.normalize_tuple(None, n=3, name='strides')
def testNormalizeDataFormat(self):
self.assertEqual(
'channels_last', utils.normalize_data_format('Channels_Last'))
self.assertEqual(
'channels_first', utils.normalize_data_format('CHANNELS_FIRST'))
with self.assertRaises(ValueError):
utils.normalize_data_format('invalid')
def testNormalizePadding(self):
self.assertEqual('same', utils.normalize_padding('SAME'))
self.assertEqual('valid', utils.normalize_padding('VALID'))
with self.assertRaises(ValueError):
utils.normalize_padding('invalid')
def testConvOutputLength(self):
self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1))
self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1))
self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1))
self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1))
self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2))
def testConvInputLength(self):
self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1))
self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2))
self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1))
self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2))
self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1))
self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2))
def testDeconvOutputLength(self):
self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2))
self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1))
self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2))
self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1))
self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2))
if __name__ == '__main__':
test.main()
|
keflavich/pyspeckit-obsolete
|
refs/heads/master
|
pyspeckit/spectrum/readers/readcol.py
|
3
|
"""
readcol.py by Adam Ginsburg (keflavich@gmail.com)
readcol is meant to emulate IDL's readcol.pro, but is more capable and
flexible. It is not a particularly "pythonic" program since it is not modular.
For a modular ascii table reader, http://cxc.harvard.edu/contrib/asciitable/ is
probably better. This single-function code is probably more intuitive to an
end-user, though.
"""
import string,re,sys
import numpy
try:
from scipy.stats import mode
hasmode = True
except ImportError:
#print "scipy could not be imported. Your table must have full rows."
hasmode = False
except ValueError:
#print "error"
hasmode = False
def readcol(filename,skipline=0,skipafter=0,names=False,fsep=None,twod=True,
fixedformat=None,asdict=False,comment='#',verbose=True,nullval=None,
asStruct=False,namecomment=True,removeblanks=False,header_badchars=None):
"""
The default return is a two dimensional float array. If you want a list of
columns output instead of a 2D array, pass 'twod=False'. In this case,
each column's data type will be automatically detected.
Example usage:
CASE 1) a table has the format:
X Y Z
0.0 2.4 8.2
1.0 3.4 5.6
0.7 3.2 2.1
...
names,(x,y,z)=readcol("myfile.tbl",names=True,twod=False)
or
x,y,z=readcol("myfile.tbl",skipline=1,twod=False)
or
names,xx = readcol("myfile.tbl",names=True)
or
xxdict = readcol("myfile.tbl",asdict=True)
or
xxstruct = readcol("myfile.tbl",asStruct=True)
CASE 2) no title is contained into the table, then there is
no need to skipline:
x,y,z=readcol("myfile.tbl")
CASE 3) there is a names column and then more descriptive text:
X Y Z
(deg) (deg) (km/s)
0.0 2.4 8.2
1.0 3.4. 5.6
...
then use:
names,x,y,z=readcol("myfile.tbl",names=True,skipline=1,twod=False)
or
x,y,z=readcol("myfile.tbl",skipline=2,twod=False)
INPUTS:
fsep - field separator, e.g. for comma separated value (csv) files
skipline - number of lines to ignore at the start of the file
names - read / don't read in the first line as a list of column names
can specify an integer line number too, though it will be
the line number after skipping lines
twod - two dimensional or one dimensional output
nullval - if specified, all instances of this value will be replaced
with a floating NaN
asdict - zips names with data to create a dict with column headings
tied to column data. If asdict=True, names will be set to True
asStruct - same as asdict, but returns a structure instead of a dictionary
(i.e. you call struct.key instead of struct['key'])
fixedformat - if you have a fixed format file, this is a python list of
column lengths. e.g. the first table above would be [3,5,5]. Note
that if you specify the wrong fixed format, you will get junk; if your
format total is greater than the line length, the last entries will all
be blank but readcol will not report an error.
namecomment - assumed that "Name" row is on a comment line. If it is not -
e.g., it is the first non-comment line, change this to False
removeblanks - remove all blank entries from split lines. This can cause lost
data if you have blank entries on some lines.
header_badchars - remove these characters from a header before parsing it
(helpful for IPAC tables that are delimited with | )
If you get this error: "scipy could not be imported. Your table must have
full rows." it means readcol cannot automatically guess which columns
contain data. If you have scipy and columns of varying length, readcol will
read in all of the rows with length=mode(row lengths).
"""
f=open(filename,'r').readlines()
null=[f.pop(0) for i in range(skipline)]
commentfilter = make_commentfilter(comment)
if namecomment is False and (names or asdict or asStruct):
while 1:
line = f.pop(0)
if line[0] != comment:
nameline = line
if header_badchars:
for c in header_badchars:
nameline = nameline.replace(c,' ')
nms=nameline.split(fsep)
break
elif len(f) == 0:
raise Exception("No uncommented lines found.")
else:
if names or asdict or asStruct:
# can specify name line
if type(names) == type(1):
nameline = f.pop(names)
else:
nameline = f.pop(0)
if nameline[0]==comment:
nameline = nameline[1:]
if header_badchars:
for c in header_badchars:
nameline = nameline.replace(c,' ')
nms=nameline.split(fsep)
null=[f.pop(0) for i in range(skipafter)]
if fixedformat:
myreadff = lambda(x): readff(x,fixedformat)
splitarr = map(myreadff,f)
splitarr = filter(commentfilter,splitarr)
else:
fstrip = map(string.strip,f)
fseps = [ fsep for i in range(len(f)) ]
splitarr = map(string.split,fstrip,fseps)
if removeblanks:
for i in xrange(splitarr.count([''])):
splitarr.remove([''])
splitarr = filter(commentfilter,splitarr)
# check to make sure each line has the same number of columns to avoid
# "ValueError: setting an array element with a sequence."
nperline = map(len,splitarr)
if hasmode:
ncols,nrows = mode(nperline)
if nrows != len(splitarr):
if verbose:
print "Removing %i rows that don't match most common length %i. \
\n%i rows read into array." % (len(splitarr) - nrows,ncols,nrows)
for i in xrange(len(splitarr)-1,-1,-1): # need to go backwards
if nperline[i] != ncols:
splitarr.pop(i)
try:
x = numpy.asarray( splitarr , dtype='float')
except ValueError:
if verbose:
print "WARNING: reading as string array because %s array failed" % 'float'
try:
x = numpy.asarray( splitarr , dtype='S')
except ValueError:
if hasmode:
raise Exception( "ValueError when converting data to array." + \
" You have scipy.mode on your system, so this is " + \
"probably not an issue of differing row lengths." )
else:
raise Exception( "Conversion to array error. You probably " + \
"have different row lengths and scipy.mode was not " + \
"imported." )
if nullval is not None:
x[x==nullval] = numpy.nan
x = get_autotype(x)
if asdict or asStruct:
mydict = dict(zip(nms,x.T))
for k,v in mydict.iteritems():
mydict[k] = get_autotype(v)
if asdict:
return mydict
elif asStruct:
return Struct(mydict)
elif names and twod:
return nms,x
elif names:
# if not returning a twod array, try to return each vector as the spec. type
return nms,[ get_autotype(x.T[i]) for i in xrange(x.shape[1]) ]
else:
if twod:
return x
else:
return [ get_autotype(x.T[i]) for i in xrange(x.shape[1]) ]
def get_autotype(arr):
"""
Attempts to return a numpy array converted to the most sensible dtype
Value errors will be caught and simply return the original array
Tries to make dtype int, then float, then no change
"""
try:
narr = arr.astype('float')
if (narr < sys.maxint).all() and (narr % 1).sum() == 0:
return narr.astype('int')
else:
return narr
except ValueError:
return arr
class Struct(object):
"""
Simple struct intended to take a dictionary of column names -> columns
and turn it into a struct by removing special characters
"""
def __init__(self,namedict):
R = re.compile('\W') # find and remove all non-alphanumeric characters
for k in namedict.keys():
v = namedict.pop(k)
if k[0].isdigit():
k = 'n'+k
namedict[R.sub('',k)] = v
self.__dict__ = namedict
def add_column(self,name,data):
"""
Add a new column (attribute) to the struct
(will overwrite anything with the same name)
"""
self.__dict__[name] = data
def readff(s,format):
"""
Fixed-format reader
Pass in a single line string (s) and a format list,
which needs to be a python list of string lengths
"""
F = numpy.array([0]+format).cumsum()
bothF = zip(F[:-1],F[1:])
strarr = [s[l:u] for l,u in bothF]
return strarr
def make_commentfilter(comment):
if comment is not None:
def commentfilter(a):
try: return comment.find(a[0][0])
except: return -1
return commentfilter
else: # always return false
return lambda(x): -1
|
nicolasnoble/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/_interceptor.py
|
15
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gRPC Python interceptors."""
import collections
import sys
import grpc
class _ServicePipeline(object):
def __init__(self, interceptors):
self.interceptors = tuple(interceptors)
def _continuation(self, thunk, index):
return lambda context: self._intercept_at(thunk, index, context)
def _intercept_at(self, thunk, index, context):
if index < len(self.interceptors):
interceptor = self.interceptors[index]
thunk = self._continuation(thunk, index + 1)
return interceptor.intercept_service(thunk, context)
else:
return thunk(context)
def execute(self, thunk, context):
return self._intercept_at(thunk, 0, context)
def service_pipeline(interceptors):
return _ServicePipeline(interceptors) if interceptors else None
class _ClientCallDetails(
collections.namedtuple('_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials',
'wait_for_ready', 'compression')),
grpc.ClientCallDetails):
pass
def _unwrap_client_call_details(call_details, default_details):
try:
method = call_details.method
except AttributeError:
method = default_details.method
try:
timeout = call_details.timeout
except AttributeError:
timeout = default_details.timeout
try:
metadata = call_details.metadata
except AttributeError:
metadata = default_details.metadata
try:
credentials = call_details.credentials
except AttributeError:
credentials = default_details.credentials
try:
wait_for_ready = call_details.wait_for_ready
except AttributeError:
wait_for_ready = default_details.wait_for_ready
try:
compression = call_details.compression
except AttributeError:
compression = default_details.compression
return method, timeout, metadata, credentials, wait_for_ready, compression
class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, exception, traceback):
super(_FailureOutcome, self).__init__()
self._exception = exception
self._traceback = traceback
def initial_metadata(self):
return None
def trailing_metadata(self):
return None
def code(self):
return grpc.StatusCode.INTERNAL
def details(self):
return 'Exception raised while intercepting the RPC'
def cancel(self):
return False
def cancelled(self):
return False
def is_active(self):
return False
def time_remaining(self):
return None
def running(self):
return False
def done(self):
return True
def result(self, ignored_timeout=None):
raise self._exception
def exception(self, ignored_timeout=None):
return self._exception
def traceback(self, ignored_timeout=None):
return self._traceback
def add_callback(self, unused_callback):
return False
def add_done_callback(self, fn):
fn(self)
def __iter__(self):
return self
def __next__(self):
raise self._exception
def next(self):
return self.__next__()
class _UnaryOutcome(grpc.Call, grpc.Future):
def __init__(self, response, call):
self._response = response
self._call = call
def initial_metadata(self):
return self._call.initial_metadata()
def trailing_metadata(self):
return self._call.trailing_metadata()
def code(self):
return self._call.code()
def details(self):
return self._call.details()
def is_active(self):
return self._call.is_active()
def time_remaining(self):
return self._call.time_remaining()
def cancel(self):
return self._call.cancel()
def add_callback(self, callback):
return self._call.add_callback(callback)
def cancelled(self):
return False
def running(self):
return False
def done(self):
return True
def result(self, ignored_timeout=None):
return self._response
def exception(self, ignored_timeout=None):
return None
def traceback(self, ignored_timeout=None):
return None
def add_done_callback(self, fn):
fn(self)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
response, ignored_call = self._with_call(request,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
return response
def _with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
return _UnaryOutcome(response, call)
except grpc.RpcError as rpc_error:
return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
call = self._interceptor.intercept_unary_unary(continuation,
client_call_details,
request)
return call.result(), call
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
return self._with_call(request,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method).future(
request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_unary_unary(
continuation, client_call_details, request)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method)(request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_unary_stream(
continuation, client_call_details, request)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
response, ignored_call = self._with_call(request_iterator,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
return response
def _with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
return _UnaryOutcome(response, call)
except grpc.RpcError as rpc_error:
return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
call = self._interceptor.intercept_stream_unary(continuation,
client_call_details,
request_iterator)
return call.result(), call
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
return self._with_call(request_iterator,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method).future(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_stream_unary(
continuation, client_call_details, request_iterator)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method)(request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_stream_stream(
continuation, client_call_details, request_iterator)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _Channel(grpc.Channel):
def __init__(self, channel, interceptor):
self._channel = channel
self._interceptor = interceptor
def subscribe(self, callback, try_to_connect=False):
self._channel.subscribe(callback, try_to_connect=try_to_connect)
def unsubscribe(self, callback):
self._channel.unsubscribe(callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.unary_unary(m, request_serializer,
response_deserializer)
if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor):
return _UnaryUnaryMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.unary_stream(m, request_serializer,
response_deserializer)
if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor):
return _UnaryStreamMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.stream_unary(m, request_serializer,
response_deserializer)
if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor):
return _StreamUnaryMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.stream_stream(m, request_serializer,
response_deserializer)
if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor):
return _StreamStreamMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def _close(self):
self._channel.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._channel.close()
def intercept_channel(channel, *interceptors):
for interceptor in reversed(list(interceptors)):
if not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) and \
not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) and \
not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) and \
not isinstance(interceptor, grpc.StreamStreamClientInterceptor):
raise TypeError('interceptor must be '
'grpc.UnaryUnaryClientInterceptor or '
'grpc.UnaryStreamClientInterceptor or '
'grpc.StreamUnaryClientInterceptor or '
'grpc.StreamStreamClientInterceptor or ')
channel = _Channel(channel, interceptor)
return channel
|
PetukhovVictor/compiler
|
refs/heads/master
|
src/Compiler/ASM/Codegen/expressions/arithmetic.py
|
1
|
from ...Core.registers import Registers
from ...Core.commands import Commands
from ...Core.types import Types
from ...Runtime.gc import GC
""" Map: arithmetic operator in programming language = arithmetic operator in ASM """
binop_compare_map = {
'+': {
'operator': Commands.ADD,
'operands': [Registers.EAX, Registers.EBX]
},
'-': {
'operator': Commands.SUB,
'operands': [Registers.EAX, Registers.EBX]
},
'*': {
'operator': Commands.MUL,
'operands': [Registers.EBX]
},
'/': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
},
'%': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
}
}
def int_aexp(compiler, node):
""" Integer compilation """
compiler.code.add(Commands.MOV, [Registers.EAX, node.i])\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def binop_aexp(compiler, node):
""" Arithmetic expression compilation """
node.left.compile_asm(compiler)
compiler.types.pop()
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EBX)\
.add(Commands.POP, Registers.EAX)
if node.op == '/' or node.op == '%':
compiler.code.add(Commands.CDQ)
compiler.code.add(binop_compare_map[node.op]['operator'], binop_compare_map[node.op]['operands'])
if node.op == '%':
compiler.code.add(Commands.MOV, [Registers.EAX, Registers.EDX])
compiler.code.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def var_aexp(compiler, node):
""" Variable compilation """
if node.context == 'assign':
gc = GC(compiler)
if compiler.environment.is_exist_local_var(node.name):
var = compiler.environment.get_local_var(node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s]' % Registers.ESP])
compiler.code.add(Commands.MOV, [var_type, Registers.EAX])
compiler.environment.update_local_var_type(node.name, node.type)
compiler.code.add(Commands.MOV, [Registers.EAX, var])
compiler.code.add(Commands.MOV, [Registers.EBX, var_type])
gc.decrement()
else:
var = compiler.environment.add_local_var(node.type, node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
if compiler.environment.defined_object is not None:
compiler.environment.set_link_object(var, compiler.environment.defined_object)
compiler.environment.defined_object = None
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s + 4]' % Registers.ESP])
compiler.code.add(Commands.MOV, [Registers.EBX, 'dword [%s]' % Registers.ESP])
gc.increment()
compiler.code.add(Commands.POP, var_type)
compiler.code.add(Commands.POP, var)
else:
compiler.code.add(Commands.MOV, [Registers.EAX, compiler.environment.get_local_var(node.name)])\
.add(Commands.PUSH, Registers.EAX)
runtime_var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.types.set(runtime_var_type)
var_type = compiler.environment.get_local_var_type(node.name)
return var_type
|
kisiel/django-oscar-przelewy24
|
refs/heads/master
|
przelewy24/views.py
|
1
|
import json
import requests
import logging
import hashlib
from decimal import Decimal as D
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from django.views.generic import View, TemplateView
from oscar.core.loading import get_class, get_model
from przelewy24 import const, PRZELEWY24_PL, LOGGING_PREFIX
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
CheckoutSessionData = get_class('checkout.session', 'CheckoutSessionData')
PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView')
OrderPlacementMixin = get_class('checkout.mixins', 'OrderPlacementMixin')
OrderTotalCalculator = get_class(
'checkout.calculators', 'OrderTotalCalculator')
Selector = get_class('partner.strategy', 'Selector')
Przelewy24PrepareForm = get_class('przelewy24.forms', 'Przelewy24PrepareForm')
Basket = get_model('basket', 'Basket')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
Przelewy24Transaction = get_model('przelewy24', 'Przelewy24Transaction')
logger = logging.getLogger(PRZELEWY24_PL)
class Przelewy24PrepareView(OrderPlacementMixin, TemplateView):
template_name = 'przelewy24/prepare_form.html'
def _get_basket(self):
self.restore_frozen_basket()
self.order_number = self.checkout_session.get_order_number()
basket = self.request.basket
self.basket_id = basket.id
shipping_address = self.get_shipping_address(basket)
shipping_method = self.get_shipping_method(
basket, shipping_address)
if shipping_method:
shipping_charge = shipping_method.calculate(basket)
else:
# It's unusual to get here as a shipping method should be set by
# the time this skip-condition is called. In the absence of any
# other evidence, we assume the shipping charge is zero.
shipping_charge = D('0.00')
self.order_total = self.get_order_totals(
basket, shipping_charge=shipping_charge).incl_tax
self.freeze_basket(basket)
self.checkout_session.set_submitted_basket(basket)
logger.info('%s - freezing basket with id: %s.' % (
LOGGING_PREFIX, self.basket_id))
def _get_scheme(self):
return 'https' if self.request.is_secure() else 'http'
def _get_host(self):
return self.request.META['HTTP_HOST']
def _get_absolute_url(self, url):
return '%s://%s%s' % (self._get_scheme(), self._get_host(), url)
def _get_p24_return_url_ok(self):
url = reverse('przelewy24-accept', args=(self.basket_id,))
return self._get_absolute_url(url)
def _get_p24_return_url_error(self):
url = reverse('przelewy24-reject', args=(self.basket_id,))
return self._get_absolute_url(url)
def _get_p24_kwota(self):
return str(int(100 * self.order_total))
def _get_p24_session_id(self):
if not hasattr(self, '_p24_session_id'):
hash_code = hashlib.sha224()
hash_code.update(str(settings.SECRET_KEY))
hash_code.update(str(const.P24_VENDOR_ID))
hash_code.update(str(now()))
self._p24_session_id = hash_code.hexdigest()
return self._p24_session_id
def _get_p24_crc(self):
if not hasattr(self, '_p24_crc'):
crc_hash = "%s|%s|%s|%s" % (
self._get_p24_session_id(), const.P24_VENDOR_ID,
self._get_p24_kwota(), const.P24_VENDOR_CRC)
m = hashlib.md5()
m.update(crc_hash)
self._p24_crc = m.hexdigest()
return self._p24_crc
def _get_p24_email(self):
return const.P24_VENDOR_EMAIL
def _get_p24_opis(self):
return const.P24_OPIS or self.request.user.email
def _save_p24_transaction(self):
p24 = Przelewy24Transaction.objects.create(**self._get_form_initial())
logger.info('%s - saved Przelewy24Transaction with ID: %s' % (
LOGGING_PREFIX, p24.pk))
return p24
def _get_form_initial(self):
p24_session_id = self._get_p24_session_id()
p24_crc = self._get_p24_crc()
p24_kwota = self._get_p24_kwota()
p24_return_url_ok = self._get_p24_return_url_ok()
p24_return_url_error = self._get_p24_return_url_error()
p24_opis = self._get_p24_opis()
initial = {
'p24_session_id': p24_session_id,
'p24_id_sprzedawcy': const.P24_VENDOR_ID,
'p24_email': self._get_p24_email(),
'p24_kwota': p24_kwota,
'p24_crc': p24_crc,
'p24_return_url_ok': p24_return_url_ok,
'p24_return_url_error': p24_return_url_error,
'p24_opis': p24_opis}
logger.info('%s - initial POST: %s' % (LOGGING_PREFIX,
json.dumps(initial)))
return initial
def get_context_data(self, **kwargs):
context = super(Przelewy24PrepareView, self).get_context_data(**kwargs)
prepare_form = Przelewy24PrepareForm(initial=self._get_form_initial())
context.update({
'prepare_form': prepare_form,
'p24_url': const.P24_INIT_URL})
return context
def get(self, request, *args, **kwargs):
self._get_basket()
self._save_p24_transaction()
return super(Przelewy24PrepareView, self).get(request, *args, **kwargs)
class Przelewy24MixIn(object):
required_POST_params = []
required_model_attrs = []
new_attrs_to_set = []
def _get_p24_transaction(self):
if not hasattr(self, '_p24_transaction'):
p24_session_id = self.request.POST['p24_session_id']
try:
p24 = Przelewy24Transaction.objects.get(
p24_session_id=p24_session_id,
status=const.P24_STATUS_INITIATED)
except Przelewy24Transaction.DoesNotExist:
logger.error('P24 - Przelewy24Transaction with ID %s does '
'not exist' % p24_session_id)
raise Http404
self._p24_transaction = p24
return self._p24_transaction
p24_transaction = property(_get_p24_transaction)
def _check_required_POST_parameters(self):
post = self.request.POST
if not all([i in post for i in self.required_POST_params]):
logger.error('%s - required POST parameter missing' %
LOGGING_PREFIX)
return False
return True
def _get_p24_crc2(self):
post = self.request.POST
crc_hash = "%s|%s|%s|%s" % (
self.p24_transaction.p24_session_id,
post['p24_order_id'],
self.p24_transaction.p24_kwota,
const.P24_VENDOR_CRC)
m = hashlib.md5()
m.update(crc_hash)
return m.hexdigest()
def _verify_p24_crc2(self):
post = self.request.POST
p24_crc2_from_post = post['p24_crc']
if not self.p24_transaction.p24_crc2 == p24_crc2_from_post:
logger.error('%s - p24_crc2 does not match. %s!=%s' % (
LOGGING_PREFIX, self.p24_transaction.p24_crc2,
p24_crc2_from_post))
return False
return True
def _verify_required_model_attrs(self):
p24 = self.p24_transaction
post = self.request.POST
for attr in self.required_model_attrs:
if not getattr(p24, attr) == post[attr]:
logger.error('%s - %s does not match. %s!=%s' % (
LOGGING_PREFIX, attr, getattr(p24, attr), post[attr]))
return False
return True
def _verify_p24_response(self):
if not self._check_required_POST_parameters() or \
not self._verify_required_model_attrs():
return False
p24_crc2 = self._get_p24_crc2()
p24 = self.p24_transaction
p24.p24_crc2 = p24_crc2
if not self._verify_p24_crc2():
return False
return True
def _save_p24_transaction(self, attrs_only=None, additional_attrs=None,
commit=True):
p24 = self.p24_transaction
post = self.request.POST
if attrs_only:
for k, v in attrs_only.items():
setattr(p24, k, v)
else:
for attr in self.new_attrs_to_set:
setattr(p24, attr, post[attr])
if additional_attrs:
for k, v in additional_attrs.items():
setattr(p24, k, v)
if commit:
p24.save()
return p24
class Przelewy24AcceptPaymentView(Przelewy24MixIn, PaymentDetailsView):
required_POST_params = ['p24_order_id', 'p24_kwota', 'p24_crc',
'p24_karta', 'p24_id_sprzedawcy',
'p24_order_id_full', 'p24_session_id']
required_model_attrs = ['p24_kwota', 'p24_id_sprzedawcy']
new_attrs_to_set = ['p24_order_id', 'p24_order_id_full', 'p24_karta']
def _set_basket(self):
self.checkout_session = CheckoutSessionData(self.request)
self.restore_frozen_basket()
self.basket_id = self.checkout_session.get_submitted_basket_id()
logger.info('%s - restoring frozen basket with id: %s.' % (
LOGGING_PREFIX, self.basket_id))
def _verify_basket_id(self):
basket_id_from_request = int(self.kwargs.get('basket_id'))
if not self.basket_id == basket_id_from_request:
logger.error('%s - basket id does not match. %s!=%s' % (
LOGGING_PREFIX, self.basket_id, basket_id_from_request))
return False
return True
def _confirm_p24_transaction(self):
p24 = self.p24_transaction
url = const.P24_TRANSACTION_URL
data = {
'p24_session_id': p24.p24_session_id,
'p24_order_id': p24.p24_order_id,
'p24_id_sprzedawcy': p24.p24_id_sprzedawcy,
'p24_kwota': p24.p24_kwota,
'p24_crc': p24.p24_crc2
}
logger.info('%s - sending confirmation request: %s' % (
LOGGING_PREFIX, json.dumps(data)))
response = requests.post(url, data=data)
logger.info('%s - confirmation response: %s' % (
LOGGING_PREFIX, response.content))
confirmation_response = list(response.iter_lines())
# confirmed
if response.status_code == 200 and confirmation_response[1] == 'TRUE':
return True, confirmation_response
# NOT confirmed
return False, confirmation_response
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
self._set_basket()
return super(Przelewy24AcceptPaymentView, self).dispatch(
request, *args, **kwargs)
def post(self, request, *args, **kwargs):
post = request.POST
logger.info('%s - accept view. Basket ID: %s POST: %s' % (
LOGGING_PREFIX, self.basket_id, json.dumps(post)))
if not self._verify_basket_id() or not self._verify_p24_response():
self._save_p24_transaction(
attrs_only={'status': const.P24_STATUS_FAKE})
messages.error(
self.request,
_("%(payment_source)s - transaction incorrect" % {
'payment_source': PRZELEWY24_PL}))
logger.error('%s - transaction incorrect' % (LOGGING_PREFIX,))
return HttpResponseRedirect(reverse('basket:summary'))
self._save_p24_transaction(commit=False)
confirmed, confirmation_response = self._confirm_p24_transaction()
if not confirmed:
self._save_p24_transaction(attrs_only={
'status': const.P24_STATUS_ACCEPTED_NOT_VERIFIED,
'p24_error_code': confirmation_response[2],
'p24_error_desc': confirmation_response[3].decode('cp1252'),
})
messages.error(
self.request,
_("%(payment_source)s - transaction NOT confirmed" %
{'payment_source': PRZELEWY24_PL}))
logger.error('%s - transaction NOT confirmed. p24_session_id: %s'
% (LOGGING_PREFIX, post.get('p24_session_id')))
return HttpResponseRedirect(reverse('basket:summary'))
self._save_p24_transaction(attrs_only={
'status': const.P24_STATUS_ACCEPTED_VERIFIED
})
logger.info('%s - transaction verified. p24_session_id: %s' % (
LOGGING_PREFIX, post.get('p24_session_id')))
submission = self.build_submission(basket=request.basket)
return self.submit(**submission)
def handle_payment(self, order_number, order_total, **kwargs):
reference = self.p24_transaction.p24_session_id
# Payment successful! Record payment source
source_type, __ = SourceType.objects.get_or_create(
name=PRZELEWY24_PL)
source = Source(
source_type=source_type,
amount_allocated=order_total.incl_tax,
reference=reference)
self.add_payment_source(source)
# Record payment event
self.add_payment_event('pre-auth', order_total.incl_tax)
class Przelewy24RejectPaymentView(Przelewy24MixIn, View):
required_params = ['p24_order_id', 'p24_kwota', 'p24_crc',
'p24_id_sprzedawcy', 'p24_error_code',
'p24_order_id_full', 'p24_session_id']
required_model_attrs = ['p24_kwota', 'p24_id_sprzedawcy']
new_attrs_to_set = ['p24_order_id', 'p24_order_id_full', 'p24_error_code']
def post(self, request, *args, **kwargs):
logger.info('P24 - reject view. Basket ID: %s' %
kwargs.get('basket_id'))
basket = get_object_or_404(Basket, id=kwargs['basket_id'],
status=Basket.FROZEN)
basket.thaw()
logger.info('P24 - reject view. POST: %s' % json.dumps(request.POST))
if not self._verify_p24_response():
messages.error(
self.request,
_("%(payment_source)s - transaction incorrect" % {
'payment_source': PRZELEWY24_PL}))
logger.error('%s - transaction FAKE' % (LOGGING_PREFIX,))
self._save_p24_transaction(
attrs_only={'status': const.P24_STATUS_FAKE})
return HttpResponseRedirect(reverse('basket:summary'))
self._save_p24_transaction(
additional_attrs={'status': const.P24_STATUS_REJECTED})
logger.info('%s - transaction rejected. p24_session_id: %s' % (
LOGGING_PREFIX, request.POST['p24_session_id']))
messages.error(
self.request,
_("%(payment_source)s - transaction failed" % {
'payment_source': PRZELEWY24_PL}))
return HttpResponseRedirect(reverse('basket:summary'))
class Przelewy24AcceptDelayedPaymentView(CheckoutSessionMixin, View):
#TODO: to be implemented
pass
|
takis/django
|
refs/heads/master
|
tests/responses/tests.py
|
226
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=503)
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_change_status_code(self):
resp = HttpResponse()
resp.status_code = 503
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
def test_repr(self):
response = HttpResponse(content="Café :)".encode(UTF8), status=201)
expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">'
self.assertEqual(repr(response), expected)
|
puiterwijk/product-definition-center
|
refs/heads/master
|
pdc/apps/changeset/tests.py
|
4
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from mock import Mock, call, patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from .middleware import ChangesetMiddleware
from .middleware import logger as changeset_logger
class ChangesetMiddlewareTestCase(TestCase):
def setUp(self):
self.cm = ChangesetMiddleware()
self.request = Mock()
setattr(self.request, "method", "POST")
def test_passing_arguments(self):
self.request.user.is_authenticated = lambda: False
self.request.META.get = lambda x, y: y
func = Mock()
func.__name__ = "Mock"
func.return_value = 123
with patch("pdc.apps.changeset.models.Changeset") as changeset:
ret = self.cm.process_view(self.request, func, [1, 2, 3], {'arg': 'val'})
self.assertTrue(func.called)
self.assertEqual(ret, 123)
self.assertEqual(func.call_args, call(self.request, 1, 2, 3, arg='val'))
self.assertEqual(changeset.mock_calls, [call(author=None, comment=None), call().commit()])
def test_no_commit_with_exception(self):
self.request.user.is_authenticated = lambda: False
self.request.META.get = lambda x, y: y
func = Mock()
func.__name__ = "Mock"
func.side_effect = Exception("Boom!")
changeset_logger.error = Mock()
with patch("pdc.apps.changeset.models.Changeset") as changeset:
self.assertRaises(Exception, self.cm.process_view, self.request, func, [], {})
self.assertTrue(func.called)
self.assertEqual(changeset.mock_calls, [call(author=None, comment=None)])
self.assertTrue(changeset_logger.error.called)
class ChangesetRESTTestCase(APITestCase):
fixtures = ['pdc/apps/changeset/fixtures/tests/changeset.json',
"pdc/apps/component/fixtures/tests/bugzilla_component.json"]
def test_get(self):
url = reverse('changeset-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data.get('changes')), 2)
self.assertEqual(response.data.get("id"), 1)
def test_list_order(self):
url = reverse('changeset-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data.get('results')
self.assertTrue(results[0].get('committed_on') > results[1].get('committed_on'))
def test_query(self):
url = reverse('changeset-list')
response = self.client.get(url + '?resource=contact', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_with_multiple_values(self):
url = reverse('changeset-list')
response = self.client.get(url + '?resource=contact&resource=person', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
def test_query_with_wrong_resource(self):
url = reverse('changeset-list')
response = self.client.get(url + '?resource=nonexists', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_with_correct_datetimeformat(self):
url = reverse('changeset-list')
response = self.client.get(url + '?changed_since=2015-02-03T02:55:18', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_with_incorrect_datetimeformat(self):
url = reverse('changeset-list')
response = self.client.get(url + '?changed_since=20150203T02:55:18', format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_with_pdc_change_comment(self):
url = reverse('changeset-list')
response = self.client.get(url + '?comment=change', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_create_with_pdc_change_comment(self):
url = reverse('bugzillacomponent-list')
data = {'name': 'bin', 'parent_pk': 1}
extra = {'HTTP_PDC_CHANGE_COMMENT': 'New bugzilla component'}
response = self.client.post(url, data, format='json', **extra)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url1 = reverse('changeset-list')
response1 = self.client.get(url1 + '?comment=new', format='json')
self.assertEqual(response1.status_code, status.HTTP_200_OK)
self.assertEqual(response1.data['count'], 1)
def test_bulk_create_with_pdc_change_comment(self):
url = reverse('bugzillacomponent-list')
data = [{'name': 'bin', 'parent_pk': 1}, {'name': 'bin1', 'parent_pk': 2}]
extra = {'HTTP_PDC_CHANGE_COMMENT': 'New bugzilla components'}
response = self.client.post(url, data, format='json', **extra)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url1 = reverse('changeset-list')
response1 = self.client.get(url1 + '?comment=components', format='json')
self.assertEqual(response1.status_code, status.HTTP_200_OK)
self.assertEqual(response1.data['count'], 1)
self.assertEqual(len(response1.data.get('results')[0].get('changes')), 2)
|
MobileWebApps/backend-python-rest-gae
|
refs/heads/master
|
lib/markdown/extensions/toc.py
|
19
|
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree, parseBoolValue, AMP_SUBSTITUTE
from .headerid import slugify, unique, itertext, stashedHTML2text
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = parseBoolValue(self.config["anchorlink"])
self.use_permalinks = parseBoolValue(self.config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = self.config["permalink"]
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = stashedHTML2text(text, self.markdown)
elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
if self.use_anchors:
self.add_anchor(c, elem_id)
if self.use_permalinks:
self.add_permalink(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"],
"permalink" : [0,
"1 or link text if a Sphinx-style permalink should be added",
"Defaults to 0"]
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)
|
jobscore/sync-engine
|
refs/heads/master
|
inbox/models/mixins.py
|
2
|
import abc
from datetime import datetime
from sqlalchemy import Column, DateTime, String, inspect, Boolean, sql, func
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from inbox.sqlalchemy_ext.util import Base36UID, generate_public_id, ABCMixin
from inbox.models.constants import MAX_INDEXABLE_LENGTH
from inbox.util.addr import canonicalize_address
from inbox.util.encoding import unicode_safe_truncate
class HasRevisions(ABCMixin):
"""Mixin for tables that should be versioned in the transaction log."""
@property
def versioned_relationships(self):
"""
May be overriden by subclasses. This should be the list of
relationship attribute names that should trigger an update revision
when changed. (We want to version changes to some, but not all,
relationship attributes.)
"""
return []
@property
def propagated_attributes(self):
"""
May be overridden by subclasses. This is the list of attribute names
that should trigger an update revision for a /related/ object -
for example, when a message's `is_read` or `categories` is changed,
we want an update revision created for the message's thread as well.
Such manual propagation is required because changes to related objects
are not reflected in the related attribute's history, only additions
and deletions are. For example, thread.messages.history will
not reflect a change made to one of the thread's messages.
"""
return []
@property
def should_suppress_transaction_creation(self):
"""
May be overridden by subclasses. We don't want to version certain
specific objects - for example, Block instances that are just raw
message parts and not real attachments. Use this property to suppress
revisions of such objects. (The need for this is really an artifact of
current deficiencies in our models. We should be able to get rid of it
eventually.)
"""
return False
# Must be defined by subclasses
API_OBJECT_NAME = abc.abstractproperty()
def has_versioned_changes(self):
"""
Return True if the object has changes on any of its column properties
or any relationship attributes named in self.versioned_relationships,
or has been manually marked as dirty (the special 'dirty' instance
attribute is set to True).
"""
obj_state = inspect(self)
versioned_attribute_names = list(self.versioned_relationships)
for mapper in obj_state.mapper.iterate_to_root():
for attr in mapper.column_attrs:
versioned_attribute_names.append(attr.key)
for attr_name in versioned_attribute_names:
if getattr(obj_state.attrs, attr_name).history.has_changes():
return True
return False
class HasPublicID(object):
public_id = Column(Base36UID, nullable=False,
index=True, default=generate_public_id)
class AddressComparator(Comparator):
def __eq__(self, other):
return self.__clause_element__() == canonicalize_address(other)
def like(self, term, escape=None):
return self.__clause_element__().like(term, escape=escape)
def in_(self, addresses):
return self.__clause_element__().in_(map(canonicalize_address, addresses))
class CaseInsensitiveComparator(Comparator):
def __eq__(self, other):
return func.lower(self.__clause_element__()) == func.lower(other)
class HasEmailAddress(object):
"""
Provides an email_address attribute, which returns as value whatever you
set it to, but uses a canonicalized form for comparisons. So e.g.
db_session.query(Account).filter_by(
email_address='ben.bitdiddle@gmail.com').all()
and
db_session.query(Account).filter_by(
email_address='ben.bitdiddle@gmail.com').all()
will return the same results, because the two Gmail addresses are
equivalent.
"""
_raw_address = Column(String(MAX_INDEXABLE_LENGTH),
nullable=True, index=True)
_canonicalized_address = Column(String(MAX_INDEXABLE_LENGTH),
nullable=True, index=True)
@hybrid_property
def email_address(self):
return self._raw_address
@email_address.comparator
def email_address(cls):
return AddressComparator(cls._canonicalized_address)
@email_address.setter
def email_address(self, value):
# Silently truncate if necessary. In practice, this may be too
# long if somebody put a super-long email into their contacts by
# mistake or something.
if value is not None:
value = unicode_safe_truncate(value, MAX_INDEXABLE_LENGTH)
self._raw_address = value
self._canonicalized_address = canonicalize_address(value)
class CreatedAtMixin(object):
created_at = Column(DateTime, default=datetime.utcnow,
nullable=False, index=True)
class UpdatedAtMixin(object):
updated_at = Column(DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=False, index=True)
class DeletedAtMixin(object):
deleted_at = Column(DateTime, nullable=True, index=True)
class HasRunState(ABCMixin):
# Track whether this object (e.g. folder, account) should be running
# or not. Used to compare against reported data points to see if all is
# well.
# Is sync enabled for this object? The sync_enabled property should be
# a Boolean that reflects whether the object should be reporting
# a heartbeat. For folder-level objects, this property can be used to
# combine local run state with the parent account's state, so we don't
# need to cascade account-level start/stop status updates down to folders.
sync_enabled = abc.abstractproperty()
# Database-level tracking of whether the sync should be running.
sync_should_run = Column(Boolean, default=True, nullable=False,
server_default=sql.expression.true())
|
Khan/tinyquery
|
refs/heads/master
|
tinyquery/api_client_test.py
|
1
|
from __future__ import absolute_import
import unittest
from tinyquery import api_client
from tinyquery import tq_types
from tinyquery import tinyquery
class ApiClientTest(unittest.TestCase):
def setUp(self):
self.tinyquery = tinyquery.TinyQuery()
self.tq_service = api_client.TinyQueryApiClient(self.tinyquery)
@staticmethod
def table_ref(table_name):
return {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': table_name,
}
def insert_simple_table(self):
self.tq_service.tables().insert(
projectId='test_project',
datasetId='test_dataset',
body={
'tableReference': self.table_ref('test_table'),
'schema': {
'fields': [
{'name': 'foo', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'bar', 'type': 'BOOLEAN', 'mode': 'NULLABLE'},
]
}
}).execute()
def run_query(self, query):
query_job_info = self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'query': {
'query': query
}
}
}
).execute()
query_result = self.tq_service.jobs().getQueryResults(
projectId='test_project',
jobId=query_job_info['jobReference']['jobId']
).execute()
return query_result
def query_to_table(self, query, dest_dataset, dest_table):
self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'query': {
'query': query,
'destinationTable': {
'projectId': 'test_project',
'datasetId': dest_dataset,
'tableId': dest_table
}
},
}
}
).execute()
# Normally, we'd need to block until it's complete, but in tinyquery we
# know that the query gets executed immediately.
def test_table_management(self):
self.insert_simple_table()
table_info = self.tq_service.tables().get(
projectId='test_project', datasetId='test_dataset',
tableId='test_table').execute()
self.assertEqual(
{'name': 'bar', 'type': 'BOOLEAN', 'mode': 'NULLABLE'},
table_info['schema']['fields'][1])
self.tq_service.tables().delete(
projectId='test_project', datasetId='test_dataset',
tableId='test_table').execute()
try:
self.tq_service.tables().get(
projectId='test_project', datasetId='test_dataset',
tableId='test_table').execute()
self.fail('Expected exception to be raised.')
except api_client.FakeHttpError as e:
self.assertTrue('404' in e.content)
try:
self.tq_service.tables().delete(
projectId='test_project', datasetId='test_dataset',
tableId='test_table').execute()
self.fail('Expected exception to be raised.')
except api_client.FakeHttpError as e:
self.assertTrue('404' in e.content)
def test_full_job_query(self):
job_info = self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'query': {
'query': 'SELECT 7 as foo',
}
}
}
).execute()
query_result = self.tq_service.jobs().getQueryResults(
projectId='test_project', jobId=job_info['jobReference']['jobId']
).execute()
self.assertEqual('7', query_result['rows'][0]['f'][0]['v'])
self.assertEqual(
{'name': 'foo', 'type': tq_types.INT},
query_result['schema']['fields'][0])
def test_sync_query(self):
# As a convenience, BigQuery also makes it possible to run a query
# synchronously in a single API request.
query_result = self.tq_service.jobs().query(
projectId='test_project',
body={
'query': 'SELECT 7 as foo',
}
).execute()
self.assertEqual('7', query_result['rows'][0]['f'][0]['v'])
def test_table_copy(self):
self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'query': {
'query': 'SELECT 7 as foo',
'destinationTable': self.table_ref('table1')
},
},
}
).execute()
for _ in range(5):
self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'copy': {
'sourceTable': self.table_ref('table1'),
'destinationTable': self.table_ref('table2'),
'createDisposition': 'CREATE_IF_NEEDED',
'writeDisposition': 'WRITE_APPEND',
}
}
}
).execute()
query_result = self.run_query('SELECT foo FROM test_dataset.table2')
self.assertEqual(5, len(query_result['rows']))
def test_patch(self):
self.insert_simple_table()
# Should not crash. TODO: Allow the new expiration time to be read.
self.tq_service.tables().patch(
projectId='test_project',
datasetId='test_dataset',
tableId='test_table',
body={
'expirationTime': 1000000000
}
).execute()
def test_create_and_query_view(self):
self.insert_simple_table()
self.tq_service.tables().insert(
projectId='test_project',
datasetId='test_dataset',
body={
'tableReference': self.table_ref('test_view'),
'view': {
'query': 'SELECT COUNT(*) AS num_rows '
'FROM test_dataset.test_table'
}
}
).execute()
# Test regular field selection.
query_result = self.run_query('SELECT num_rows '
'FROM test_dataset.test_view')
self.assertEqual('0', query_result['rows'][0]['f'][0]['v'])
# Test field selection with a table alias.
query_result = self.run_query('SELECT t.num_rows '
'FROM test_dataset.test_view t')
self.assertEqual('0', query_result['rows'][0]['f'][0]['v'])
# Test field selection with the fully-qualified name.
query_result = self.run_query('SELECT test_dataset.test_view.num_rows '
'FROM test_dataset.test_view')
self.assertEqual('0', query_result['rows'][0]['f'][0]['v'])
def test_list_tables(self):
self.insert_simple_table()
self.tq_service.jobs().insert(
projectId='test_project',
body={
'projectId': 'test_project',
'configuration': {
'query': {
'query': 'SELECT 7 as foo',
'destinationTable': self.table_ref('another_table')
},
}
}
).execute()
response = self.tq_service.tables().list(
projectId='test_project',
datasetId='test_dataset',
pageToken=None,
maxResults=5
).execute()
table_list = response['tables']
self.assertEqual(2, len(table_list))
self.assertEqual('another_table',
table_list[0]['tableReference']['tableId'])
self.assertEqual('test_table',
table_list[1]['tableReference']['tableId'])
def test_list_tabledata(self):
self.query_to_table(
"""
SELECT * FROM
(SELECT 0 AS foo, 'hello' AS bar),
(SELECT 7 AS foo, 'goodbye' AS bar)
""",
'test_dataset', 'test_table_2')
list_response = self.tq_service.tabledata().list(
projectId='test_project', datasetId='test_dataset',
tableId='test_table_2').execute()
self.assertEqual('0', list_response['rows'][0]['f'][0]['v'])
self.assertEqual('hello', list_response['rows'][0]['f'][1]['v'])
self.assertEqual('7', list_response['rows'][1]['f'][0]['v'])
self.assertEqual('goodbye', list_response['rows'][1]['f'][1]['v'])
|
vmthunder/nova
|
refs/heads/master
|
nova/network/security_group/openstack_driver.py
|
5
|
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.utils import importutils
security_group_opts = [
cfg.StrOpt('security_group_api',
default='nova',
help='The full class name of the security API class'),
]
CONF = cfg.CONF
CONF.register_opts(security_group_opts)
NOVA_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNovaSecurityGroupAPI')
NEUTRON_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNeutronSecurityGroupAPI')
def get_openstack_security_group_driver():
if CONF.security_group_api.lower() == 'nova':
return importutils.import_object(NOVA_DRIVER)
elif CONF.security_group_api.lower() in ('neutron', 'quantum'):
return importutils.import_object(NEUTRON_DRIVER)
else:
return importutils.import_object(CONF.security_group_api)
def is_neutron_security_groups():
return CONF.security_group_api.lower() in ('neutron', 'quantum')
|
Superjom/models-1
|
refs/heads/develop
|
fluid/image_classification/mobilenet.py
|
1
|
import os
import paddle.v2 as paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
parameter_attr = ParamAttr(initializer=MSRA())
def conv_bn_layer(input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def depthwise_separable(input, num_filters1, num_filters2, num_groups, stride,
scale):
"""
"""
depthwise_conv = conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False)
pointwise_conv = conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
return pointwise_conv
def mobile_net(img, class_dim, scale=1.0):
# conv1: 112x112
tmp = conv_bn_layer(
img,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1)
# 56x56
tmp = depthwise_separable(
tmp,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale)
tmp = depthwise_separable(
tmp,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale)
# 28x28
tmp = depthwise_separable(
tmp,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale)
tmp = depthwise_separable(
tmp,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale)
# 14x14
tmp = depthwise_separable(
tmp,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale)
tmp = depthwise_separable(
tmp,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale)
# 14x14
for i in range(5):
tmp = depthwise_separable(
tmp,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale)
# 7x7
tmp = depthwise_separable(
tmp,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale)
tmp = depthwise_separable(
tmp,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale)
tmp = fluid.layers.pool2d(
input=tmp,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
tmp = fluid.layers.fc(input=tmp,
size=class_dim,
act='softmax',
param_attr=parameter_attr)
return tmp
def train(learning_rate, batch_size, num_passes, model_save_dir='model'):
class_dim = 102
image_shape = [3, 224, 224]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = mobile_net(image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(5 * 1e-5))
opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=out, label=label)
inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program):
test_accuracy = fluid.evaluator.Accuracy(input=out, label=label)
test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states
inference_program = fluid.io.get_inference_program(test_target)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
train_reader = paddle.batch(
paddle.dataset.flowers.train(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.flowers.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
for pass_id in range(num_passes):
accuracy.reset(exe)
for batch_id, data in enumerate(train_reader()):
loss, acc = exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost] + accuracy.metrics)
print("Pass {0}, batch {1}, loss {2}, acc {3}".format(
pass_id, batch_id, loss[0], acc[0]))
pass_acc = accuracy.eval(exe)
test_accuracy.reset(exe)
for data in test_reader():
loss, acc = exe.run(inference_program,
feed=feeder.feed(data),
fetch_list=[avg_cost] + test_accuracy.metrics)
test_pass_acc = test_accuracy.eval(exe)
print("End pass {0}, train_acc {1}, test_acc {2}".format(
pass_id, pass_acc, test_pass_acc))
if pass_id % 10 == 0:
model_path = os.path.join(model_save_dir, str(pass_id))
print 'save models to %s' % (model_path)
fluid.io.save_inference_model(model_path, ['image'], [out], exe)
if __name__ == '__main__':
train(learning_rate=0.005, batch_size=40, num_passes=300)
|
carlitosvi/django-jquery-file-upload
|
refs/heads/master
|
fileupload/templatetags/upload_tags.py
|
14
|
from django import template
register = template.Library()
@register.simple_tag
def upload_js():
return """
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td class="preview"><span class="fade"></span></td>
<td class="name"><span>{%=file.name%}</span></td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
{% if (file.error) { %}
<td class="error" colspan="2"><span class="label label-important">{%=locale.fileupload.error%}</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else if (o.files.valid && !i) { %}
<td>
<div class="progress progress-success progress-striped active"><div class="bar" style="width:0%;"></div></div>
</td>
<td class="start">{% if (!o.options.autoUpload) { %}
<button class="btn btn-success">
<i class="icon-upload icon-white"></i>
<span>{%=locale.fileupload.start%}</span>
</button>
{% } %}</td>
{% } else { %}
<td colspan="2"></td>
{% } %}
<td class="cancel">{% if (!i) { %}
<button class="btn btn-warning">
<i class="icon-ban-circle icon-white"></i>
<span>{%=locale.fileupload.cancel%}</span>
</button>
{% } %}</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
{% if (file.error) { %}
<td></td>
<td class="name"><span>{%=file.name%}</span></td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
<td class="error" colspan="2"><span class="label label-important">{%=locale.fileupload.error%}</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else { %}
<td class="preview">{% if (file.thumbnail_url) { %}
<a href="{%=file.url%}" title="{%=file.name%}" rel="gallery" download="{%=file.name%}"><img src="{%=file.thumbnail_url%}"></a>
{% } %}</td>
<td class="name">
<a href="{%=file.url%}" title="{%=file.name%}" rel="{%=file.thumbnail_url&&'gallery'%}" download="{%=file.name%}">{%=file.name%}</a>
</td>
<td class="size"><span>{%=o.formatFileSize(file.size)%}</span></td>
<td colspan="2"></td>
{% } %}
<td class="delete">
<button class="btn btn-danger" data-type="{%=file.delete_type%}" data-url="{%=file.delete_url%}">
<i class="icon-trash icon-white"></i>
<span>{%=locale.fileupload.destroy%}</span>
</button>
<input type="checkbox" name="delete" value="1">
</td>
</tr>
{% } %}
</script>
"""
|
guyb17/class1
|
refs/heads/master
|
ex10_parse-nonAES.py
|
1
|
#! /usr/bin/env python
from ciscoconfparse import CiscoConfParse
c_cfg = CiscoConfParse("cisco_ipsec.txt")
cryp_map = c_cfg.find_objects_wo_child(parentspec=r"^crypto map CRYPTO", childspec=r"set transform-set AES-SHA")
for element in cryp_map:
print element.text
for child in element.children:
print child.text
|
un33k/CouchPotatoServer
|
refs/heads/master
|
libs/pyutil/scripts/memdump2dot.py
|
106
|
#!/usr/bin/env python
import bindann
bindann.install_exception_handler()
import sys
inf = open(sys.argv[1], "r")
outf = open(sys.argv[1]+".dot", "w")
outf.write("digraph %s {\n" % sys.argv[1].replace(".",""))
def parse_netstring(l, i):
try:
j = l.find(':', i)
if j == -1:
return (None, len(l),)
lenval = int(l[i:j])
val = l[j+1:j+1+lenval]
# skip the comma
assert l[j+1+lenval] == ","
return (val, j+1+lenval+1,)
except Exception, le:
le.args = tuple(le.args + (l, i,))
raise
def parse_ref(l, i):
(attrname, i,) = parse_netstring(l, i)
j = l.find(",", i)
assert j != -1
objid = l[i:j]
return (objid, attrname, j+1,)
def parse_memdump_line(l):
result = []
i = l.find('-')
objid = l[:i]
(objdesc, i,) = parse_netstring(l, i+1)
result.append((objid, objdesc,))
while i != -1 and i < len(l):
(objid, attrname, i,) = parse_ref(l, i)
result.append((objid, attrname,))
return result
for l in inf:
if l[-1] != "\n":
raise "waht the HECK? %r" % l
res = parse_memdump_line(l.strip())
# declare the node
outf.write("\"%s\" [label=\"%s\"];\n" % (res[0][0], res[0][1],))
# declare all the edges
for edge in res[1:]:
if edge[1]:
# a named edge
outf.write("\"%s\" -> \"%s\" [style=bold, label=\"%s\"];\n" % (res[0][0], edge[0], edge[1],))
else:
# an anonymous edge
outf.write("\"%s\" -> \"%s\";\n" % (res[0][0], edge[0]))
outf.write("}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.