repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
RoboPi-CSEDU/rupai
|
refs/heads/master
|
Adafruit_Python_SSD1306/emotion/emotion.py
|
1
|
import math
import time
import os
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Initialize library.
disp.begin()
# Get display width and height.
width = disp.width
height = disp.height
# Clear display.
disp.clear()
disp.display()
def blink():
ims = []
ims.append(Image.open("blink/1.png").convert('1'))
ims.append(Image.open("blink/2.png").convert('1'))
ims.append(Image.open("blink/3.png").convert('1'))
ims.append(Image.open("blink/4.png").convert('1'))
ims.append(Image.open("blink/5.png").convert('1'))
while True:
time.sleep(2.0)
nframes = 0
for count in xrange(5):
disp.image(ims[count])
disp.display()
# print nframes
nframes += 1
time.sleep(0.05)
blink()
|
yfried/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/pamd.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: pamd
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Manage PAM Modules
description:
- Edit PAM service's type, control, module path and module arguments.
In order for a PAM rule to be modified, the type, control and
module_path must match an existing rule. See man(5) pam.d for details.
version_added: "2.3"
options:
name:
required: true
description:
- The name generally refers to the PAM service file to
change, for example system-auth.
type:
required: true
description:
- The type of the PAM rule being modified. The type, control
and module_path all must match a rule to be modified.
control:
required: true
description:
- The control of the PAM rule being modified. This may be a
complicated control with brackets. If this is the case, be
sure to put "[bracketed controls]" in quotes. The type,
control and module_path all must match a rule to be modified.
module_path:
required: true
description:
- The module path of the PAM rule being modified. The type,
control and module_path all must match a rule to be modified.
new_type:
description:
- The new type to assign to the new rule.
new_control:
description:
- The new control to assign to the new rule.
new_module_path:
description:
- The new module path to be assigned to the new rule.
module_arguments:
description:
- When state is 'updated', the module_arguments will replace existing
module_arguments. When state is 'args_absent' args matching those
listed in module_arguments will be removed. When state is
'args_present' any args listed in module_arguments are added if
missing from the existing rule. Furthermore, if the module argument
takes a value denoted by '=', the value will be changed to that specified
in module_arguments. Note that module_arguments is a list. Please see
the examples for usage.
state:
default: updated
choices:
- updated
- before
- after
- args_present
- args_absent
- absent
description:
- The default of 'updated' will modify an existing rule if type,
control and module_path all match an existing rule. With 'before',
the new rule will be inserted before a rule matching type, control
and module_path. Similarly, with 'after', the new rule will be inserted
after an existing rule matching type, control and module_path. With
either 'before' or 'after' new_type, new_control, and new_module_path
must all be specified. If state is 'args_absent' or 'args_present',
new_type, new_control, and new_module_path will be ignored. State
'absent' will remove the rule. The 'absent' state was added in version
2.4 and is only available in Ansible versions >= 2.4.
path:
default: /etc/pam.d/
description:
- This is the path to the PAM service files
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
version_added: '2.6'
"""
EXAMPLES = """
- name: Update pamd rule's control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_control: sufficient
- name: Update pamd rule's complex control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
new_control: '[success=2 default=ignore]'
- name: Insert a new rule before an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_type: auth
new_control: sufficient
new_module_path: pam_faillock.so
state: before
- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
existing rule pam_rootok.so
pamd:
name: su
type: auth
control: sufficient
module_path: pam_rootok.so
new_type: auth
new_control: required
new_module_path: pam_wheel.so
module_arguments: 'use_uid'
state: after
- name: Remove module arguments from an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: ''
state: updated
- name: Replace all module arguments in an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'preauth
silent
deny=3
unlock_time=604800
fail_interval=900'
state: updated
- name: Remove specific arguments from a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_absent
- name: Ensure specific arguments are present in a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_present
- name: Ensure specific arguments are present in a rule (alternative)
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments:
- crond
- quiet
state: args_present
- name: Module arguments requiring commas must be listed as a Yaml list
pamd:
name: special-module
type: account
control: required
module_path: pam_access.so
module_arguments:
- listsep=,
state: args_present
- name: Update specific argument value in a rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'fail_interval=300'
state: args_present
- name: Add pam common-auth rule for duo
pamd:
name: common-auth
new_type: auth
new_control: '[success=1 default=ignore]'
new_module_path: '/lib64/security/pam_duo.so'
state: after
type: auth
module_path: pam_sss.so
control: 'requisite'
"""
RETURN = '''
change_count:
description: How many rules were changed
type: int
sample: 1
returned: success
version_added: 2.4
new_rule:
description: The changes to the rule. This was available in Ansible version 2.4 and 2.5. It was removed in 2.6.
type: string
sample: None None None sha512 shadow try_first_pass use_authtok
returned: success
version_added: 2.4
updated_rule_(n):
description: The rule(s) that was/were changed. This is only available in
Ansible version 2.4 and was removed in 2.5.
type: string
sample:
- password sufficient pam_unix.so sha512 shadow try_first_pass
use_authtok
returned: success
version_added: 2.4
action:
description:
- "That action that was taken and is one of: update_rule,
insert_before_rule, insert_after_rule, args_present, args_absent,
absent. This was available in Ansible version 2.4 and removed in 2.8"
returned: always
type: string
sample: "update_rule"
version_added: 2.4
dest:
description:
- "Path to pam.d service that was changed. This is only available in
Ansible version 2.3 and was removed in 2.4."
returned: success
type: string
sample: "/etc/pam.d/system-auth"
backupdest:
description:
- "The file name of the backup file, if created."
returned: success
type: string
version_added: 2.6
...
'''
from ansible.module_utils.basic import AnsibleModule
import os
import re
from tempfile import NamedTemporaryFile
from datetime import datetime
RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
(?P<control>\[.*\]|\S*)\s+
(?P<path>\S*)\s*
(?P<args>.*)\s*""", re.X)
RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
class PamdLine(object):
def __init__(self, line):
self.line = line
self.prev = None
self.next = None
@property
def is_valid(self):
if self.line == '':
return True
return False
def validate(self):
if not self.is_valid:
return False, "Rule is not valid " + self.line
return True, "Rule is valid " + self.line
# Method to check if a rule matches the type, control and path.
def matches(self, rule_type, rule_control, rule_path, rule_args=None):
return False
def __str__(self):
return str(self.line)
class PamdComment(PamdLine):
def __init__(self, line):
super(PamdComment, self).__init__(line)
@property
def is_valid(self):
if self.line.startswith('#'):
return True
return False
class PamdInclude(PamdLine):
def __init__(self, line):
super(PamdInclude, self).__init__(line)
@property
def is_valid(self):
if self.line.startswith('@include'):
return True
return False
class PamdRule(PamdLine):
valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
'incomplete', 'default']
valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
self._control = None
self._args = None
self.rule_type = rule_type
self.rule_control = rule_control
self.rule_path = rule_path
self.rule_args = rule_args
# Method to check if a rule matches the type, control and path.
def matches(self, rule_type, rule_control, rule_path, rule_args=None):
if (rule_type == self.rule_type and
rule_control == self.rule_control and
rule_path == self.rule_path):
return True
return False
@classmethod
def rule_from_string(cls, line):
rule_match = RULE_REGEX.search(line)
rule_args = parse_module_arguments(rule_match.group('args'))
return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
def __str__(self):
if self.rule_args:
return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
@property
def rule_control(self):
if isinstance(self._control, list):
return '[' + ' '.join(self._control) + ']'
return self._control
@rule_control.setter
def rule_control(self, control):
if control.startswith('['):
control = control.replace(' = ', '=').replace('[', '').replace(']', '')
self._control = control.split(' ')
else:
self._control = control
@property
def rule_args(self):
if not self._args:
return []
return self._args
@rule_args.setter
def rule_args(self, args):
self._args = parse_module_arguments(args)
@property
def line(self):
return str(self)
@classmethod
def is_action_unsigned_int(cls, string_num):
number = 0
try:
number = int(string_num)
except ValueError:
return False
if number >= 0:
return True
return False
@property
def is_valid(self):
return self.validate()[0]
def validate(self):
# Validate the rule type
if self.rule_type not in VALID_TYPES:
return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
# Validate the rule control
if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
elif isinstance(self._control, list):
for control in self._control:
value, action = control.split("=")
if value not in PamdRule.valid_control_values:
return False, "Rule control value, " + value + ", is not valid in rule " + self.line
if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
return False, "Rule control action, " + action + ", is not valid in rule " + self.line
# TODO: Validate path
return True, "Rule is valid " + self.line
# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
# as a doubly linked list.
class PamdService(object):
def __init__(self, content):
self._head = None
self._tail = None
for line in content.splitlines():
if line.lstrip().startswith('#'):
pamd_line = PamdComment(line)
elif line.lstrip().startswith('@include'):
pamd_line = PamdInclude(line)
elif line == '':
pamd_line = PamdLine(line)
else:
pamd_line = PamdRule.rule_from_string(line)
self.append(pamd_line)
def append(self, pamd_line):
if self._head is None:
self._head = self._tail = pamd_line
else:
pamd_line.prev = self._tail
pamd_line.next = None
self._tail.next = pamd_line
self._tail = pamd_line
def remove(self, rule_type, rule_control, rule_path):
current_line = self._head
changed = 0
while current_line is not None:
if current_line.matches(rule_type, rule_control, rule_path):
if current_line.prev is not None:
current_line.prev.next = current_line.next
current_line.next.prev = current_line.prev
else:
self._head = current_line.next
current_line.next.prev = None
changed += 1
current_line = current_line.next
return changed
def get(self, rule_type, rule_control, rule_path):
lines = []
current_line = self._head
while current_line is not None:
if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
lines.append(current_line)
current_line = current_line.next
return lines
def has_rule(self, rule_type, rule_control, rule_path):
if self.get(rule_type, rule_control, rule_path):
return True
return False
def update_rule(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
new_args = parse_module_arguments(new_args)
changes = 0
for current_rule in rules_to_find:
rule_changed = False
if new_type:
if(current_rule.rule_type != new_type):
rule_changed = True
current_rule.rule_type = new_type
if new_control:
if(current_rule.rule_control != new_control):
rule_changed = True
current_rule.rule_control = new_control
if new_path:
if(current_rule.rule_path != new_path):
rule_changed = True
current_rule.rule_path = new_path
if new_args:
if(current_rule.rule_args != new_args):
rule_changed = True
current_rule.rule_args = new_args
if rule_changed:
changes += 1
return changes
def insert_before(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
changes = 0
# There are two cases to consider.
# 1. The new rule doesn't exist before the existing rule
# 2. The new rule exists
for current_rule in rules_to_find:
# Create a new rule
new_rule = PamdRule(new_type, new_control, new_path, new_args)
# First we'll get the previous rule.
previous_rule = current_rule.prev
# Next we may have to loop backwards if the previous line is a comment. If it
# is, we'll get the previous "rule's" previous.
while previous_rule is not None and isinstance(previous_rule, PamdComment):
previous_rule = previous_rule.prev
# Next we'll see if the previous rule matches what we are trying to insert.
if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
# First set the original previous rule's next to the new_rule
previous_rule.next = new_rule
# Second, set the new_rule's previous to the original previous
new_rule.prev = previous_rule
# Third, set the new rule's next to the current rule
new_rule.next = current_rule
# Fourth, set the current rule's previous to the new_rule
current_rule.prev = new_rule
changes += 1
# Handle the case where it is the first rule in the list.
elif previous_rule is None:
# This is the case where the current rule is not only the first rule
# but the first line as well. So we set the head to the new rule
if current_rule.prev is None:
self._head = new_rule
# This case would occur if the previous line was a comment.
else:
current_rule.prev.next = new_rule
new_rule.prev = current_rule.prev
new_rule.next = current_rule
current_rule.prev = new_rule
changes += 1
return changes
def insert_after(self, rule_type, rule_control, rule_path,
new_type=None, new_control=None, new_path=None, new_args=None):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
changes = 0
# There are two cases to consider.
# 1. The new rule doesn't exist after the existing rule
# 2. The new rule exists
for current_rule in rules_to_find:
# First we'll get the next rule.
next_rule = current_rule.next
# Next we may have to loop forwards if the next line is a comment. If it
# is, we'll get the next "rule's" next.
while next_rule is not None and isinstance(next_rule, PamdComment):
next_rule = next_rule.next
# First we create a new rule
new_rule = PamdRule(new_type, new_control, new_path, new_args)
if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
# If the previous rule doesn't match we'll insert our new rule.
# Second set the original next rule's previuous to the new_rule
next_rule.prev = new_rule
# Third, set the new_rule's next to the original next rule
new_rule.next = next_rule
# Fourth, set the new rule's previous to the current rule
new_rule.prev = current_rule
# Fifth, set the current rule's next to the new_rule
current_rule.next = new_rule
changes += 1
# This is the case where the current_rule is the last in the list
elif next_rule is None:
new_rule.prev = self._tail
new_rule.next = None
self._tail.next = new_rule
self._tail = new_rule
current_rule.next = new_rule
changes += 1
return changes
def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
args_to_add = parse_module_arguments(args_to_add)
changes = 0
for current_rule in rules_to_find:
rule_changed = False
# create some structures to evaluate the situation
simple_new_args = set()
key_value_new_args = dict()
for arg in args_to_add:
if arg.startswith("["):
continue
elif "=" in arg:
key, value = arg.split("=")
key_value_new_args[key] = value
else:
simple_new_args.add(arg)
key_value_new_args_set = set(key_value_new_args)
simple_current_args = set()
key_value_current_args = dict()
for arg in current_rule.rule_args:
if arg.startswith("["):
continue
elif "=" in arg:
key, value = arg.split("=")
key_value_current_args[key] = value
else:
simple_current_args.add(arg)
key_value_current_args_set = set(key_value_current_args)
new_args_to_add = list()
# Handle new simple arguments
if simple_new_args.difference(simple_current_args):
for arg in simple_new_args.difference(simple_current_args):
new_args_to_add.append(arg)
# Handle new key value arguments
if key_value_new_args_set.difference(key_value_current_args_set):
for key in key_value_new_args_set.difference(key_value_current_args_set):
new_args_to_add.append(key + '=' + key_value_new_args[key])
if new_args_to_add:
current_rule.rule_args += new_args_to_add
rule_changed = True
# Handle existing key value arguments when value is not equal
if key_value_new_args_set.intersection(key_value_current_args_set):
for key in key_value_new_args_set.intersection(key_value_current_args_set):
if key_value_current_args[key] != key_value_new_args[key]:
arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
rule_changed = True
if rule_changed:
changes += 1
return changes
def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
# Get a list of rules we want to change
rules_to_find = self.get(rule_type, rule_control, rule_path)
args_to_remove = parse_module_arguments(args_to_remove)
changes = 0
for current_rule in rules_to_find:
if not args_to_remove:
args_to_remove = []
# Let's check to see if there are any args to remove by finding the intersection
# of the rule's current args and the args_to_remove lists
if not list(set(current_rule.rule_args) & set(args_to_remove)):
continue
# There are args to remove, so we create a list of new_args absent the args
# to remove.
current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
changes += 1
return changes
def validate(self):
current_line = self._head
while current_line is not None:
if not current_line.validate()[0]:
return current_line.validate()
current_line = current_line.next
return True, "Module is valid"
def __str__(self):
lines = []
current_line = self._head
while current_line is not None:
lines.append(str(current_line))
current_line = current_line.next
if lines[1].startswith("# Updated by Ansible"):
lines.pop(1)
lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
return '\n'.join(lines) + '\n'
def parse_module_arguments(module_arguments):
# Return empty list if we have no args to parse
if not module_arguments:
return []
elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
return []
if not isinstance(module_arguments, list):
module_arguments = [module_arguments]
parsed_args = list()
for arg in module_arguments:
for item in filter(None, RULE_ARG_REGEX.findall(arg)):
if not item.startswith("["):
re.sub("\\s*=\\s*", "=", item)
parsed_args.append(item)
return parsed_args
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
type=dict(required=True,
choices=VALID_TYPES),
control=dict(required=True, type='str'),
module_path=dict(required=True, type='str'),
new_type=dict(required=False,
choices=VALID_TYPES),
new_control=dict(required=False, type='str'),
new_module_path=dict(required=False, type='str'),
module_arguments=dict(required=False, type='list'),
state=dict(required=False, default="updated",
choices=['before', 'after', 'updated',
'args_absent', 'args_present', 'absent']),
path=dict(required=False, default='/etc/pam.d', type='str'),
backup=dict(default=False, type='bool')
),
supports_check_mode=True,
required_if=[
("state", "args_present", ["module_arguments"]),
("state", "args_absent", ["module_arguments"]),
("state", "before", ["new_control"]),
("state", "before", ["new_type"]),
("state", "before", ["new_module_path"]),
("state", "after", ["new_control"]),
("state", "after", ["new_type"]),
("state", "after", ["new_module_path"])
]
)
content = str()
fname = os.path.join(module.params["path"], module.params["name"])
# Open the file and read the content or fail
try:
with open(fname, 'r') as service_file_obj:
content = service_file_obj.read()
except IOError as e:
# If unable to read the file, fail out
module.fail_json(msg='Unable to open/read PAM module \
file %s with error %s.' %
(fname, str(e)))
# Assuming we didnt fail, create the service
service = PamdService(content)
# Set the action
action = module.params['state']
changes = 0
# Take action
if action == 'updated':
changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'before':
changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'after':
changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
module.params['module_arguments'])
elif action == 'args_absent':
changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
module.params['module_arguments'])
elif action == 'args_present':
if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
module.params['module_arguments'])
elif action == 'absent':
changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
valid, msg = service.validate()
# If the module is not valid (meaning one of the rules is invalid), we will fail
if not valid:
module.fail_json(msg=msg)
result = dict(
changed=(changes > 0),
change_count=changes,
backupdest='',
)
# If not check mode and something changed, backup the original if necessary then write out the file or fail
if not module.check_mode and result['changed']:
# First, create a backup if desired.
if module.params['backup']:
result['backupdest'] = module.backup_local(fname)
try:
temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
with open(temp_file.name, 'w') as fd:
fd.write(str(service))
except IOError:
module.fail_json(msg='Unable to create temporary \
file %s' % temp_file)
module.atomic_move(temp_file.name, os.path.realpath(fname))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
andykimpe/chromium-test-npapi
|
refs/heads/master
|
third_party/bintrees/bintrees/treemixin.py
|
156
|
#!/usr/bin/env python
#coding:utf-8
# Author: Mozman
# Purpose: treemixin provides top level functions for binary trees
# Created: 03.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
from __future__ import absolute_import
from .walker import Walker
from .treeslice import TreeSlice
class TreeMixin(object):
"""
Abstract-Base-Class for the pure Python Trees: BinaryTree, AVLTree and RBTree
Mixin-Class for the Cython based Trees: FastBinaryTree, FastAVLTree, FastRBTree
The TreeMixin Class
===================
T has to implement following properties
---------------------------------------
count -- get node count
T has to implement following methods
------------------------------------
get_walker(...)
get a tree walker object, provides methods to traverse the tree see walker.py
insert(...)
insert(key, value) <==> T[key] = value, insert key into T
remove(...)
remove(key) <==> del T[key], remove key from T
clear(...)
T.clear() -> None. Remove all items from T.
Methods defined here
--------------------
* __contains__(k) -> True if T has a key k, else False, O(log(n))
* __delitem__(y) <==> del T[y], del T[s:e], O(log(n))
* __getitem__(y) <==> T[y], T[s:e], O(log(n))
* __iter__() <==> iter(T)
* __len__() <==> len(T), O(1)
* __max__() <==> max(T), get max item (k,v) of T, O(log(n))
* __min__() <==> min(T), get min item (k,v) of T, O(log(n))
* __and__(other) <==> T & other, intersection
* __or__(other) <==> T | other, union
* __sub__(other) <==> T - other, difference
* __xor__(other) <==> T ^ other, symmetric_difference
* __repr__() <==> repr(T)
* __setitem__(k, v) <==> T[k] = v, O(log(n))
* clear() -> None, Remove all items from T, , O(n)
* copy() -> a shallow copy of T, O(n*log(n))
* discard(k) -> None, remove k from T, if k is present, O(log(n))
* get(k[,d]) -> T[k] if k in T, else d, O(log(n))
* is_empty() -> True if len(T) == 0, O(1)
* items([reverse]) -> generator for (k, v) items of T, O(n)
* keys([reverse]) -> generator for keys of T, O(n)
* values([reverse]) -> generator for values of T, O(n)
* pop(k[,d]) -> v, remove specified key and return the corresponding value, O(log(n))
* popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple, O(log(n))
* setdefault(k[,d]) -> T.get(k, d), also set T[k]=d if k not in T, O(log(n))
* update(E) -> None. Update T from dict/iterable E, O(E*log(n))
* foreach(f, [order]) -> visit all nodes of tree and call f(k, v) for each node, O(n)
slicing by keys
* itemslice(s, e) -> generator for (k, v) items of T for s <= key < e, O(n)
* keyslice(s, e) -> generator for keys of T for s <= key < e, O(n)
* valueslice(s, e) -> generator for values of T for s <= key < e, O(n)
* T[s:e] -> TreeSlice object, with keys in range s <= key < e, O(n)
* del T[s:e] -> remove items by key slicing, for s <= key < e, O(n)
if 's' is None or T[:e] TreeSlice/iterator starts with value of min_key()
if 'e' is None or T[s:] TreeSlice/iterator ends with value of max_key()
T[:] is a TreeSlice which represents the whole tree.
TreeSlice is a tree wrapper with range check, and contains no references
to objects, deleting objects in the associated tree also deletes the object
in the TreeSlice.
* TreeSlice[k] -> get value for key k, raises KeyError if k not exists in range s:e
* TreeSlice[s1:e1] -> TreeSlice object, with keys in range s1 <= key < e1
* new lower bound is max(s, s1)
* new upper bound is min(e, e1)
TreeSlice methods:
* items() -> generator for (k, v) items of T, O(n)
* keys() -> generator for keys of T, O(n)
* values() -> generator for values of T, O(n)
* __iter__ <==> keys()
* __repr__ <==> repr(T)
* __contains__(key)-> True if TreeSlice has a key k, else False, O(log(n))
prev/succ operations
* prev_item(key) -> get (k, v) pair, where k is predecessor to key, O(log(n))
* prev_key(key) -> k, get the predecessor of key, O(log(n))
* succ_item(key) -> get (k,v) pair as a 2-tuple, where k is successor to key, O(log(n))
* succ_key(key) -> k, get the successor of key, O(log(n))
* floor_item(key) -> get (k, v) pair, where k is the greatest key less than or equal to key, O(log(n))
* floor_key(key) -> k, get the greatest key less than or equal to key, O(log(n))
* ceiling_item(key) -> get (k, v) pair, where k is the smallest key greater than or equal to key, O(log(n))
* ceiling_key(key) -> k, get the smallest key greater than or equal to key, O(log(n))
Heap methods
* max_item() -> get largest (key, value) pair of T, O(log(n))
* max_key() -> get largest key of T, O(log(n))
* min_item() -> get smallest (key, value) pair of T, O(log(n))
* min_key() -> get smallest key of T, O(log(n))
* pop_min() -> (k, v), remove item with minimum key, O(log(n))
* pop_max() -> (k, v), remove item with maximum key, O(log(n))
* nlargest(i[,pop]) -> get list of i largest items (k, v), O(i*log(n))
* nsmallest(i[,pop]) -> get list of i smallest items (k, v), O(i*log(n))
Set methods (using frozenset)
* intersection(t1, t2, ...) -> Tree with keys *common* to all trees
* union(t1, t2, ...) -> Tree with keys from *either* trees
* difference(t1, t2, ...) -> Tree with keys in T but not any of t1, t2, ...
* symmetric_difference(t1) -> Tree with keys in either T and t1 but not both
* issubset(S) -> True if every element in T is in S
* issuperset(S) -> True if every element in S is in T
* isdisjoint(S) -> True if T has a null intersection with S
Classmethods
* fromkeys(S[,v]) -> New tree with keys from S and values equal to v.
"""
def get_walker(self):
return Walker(self)
def __repr__(self):
""" x.__repr__(...) <==> repr(x) """
tpl = "%s({%s})" % (self.__class__.__name__, '%s')
return tpl % ", ".join( ("%r: %r" % item for item in self.items()) )
def copy(self):
""" T.copy() -> get a shallow copy of T. """
tree = self.__class__()
self.foreach(tree.insert, order=-1)
return tree
__copy__ = copy
def __contains__(self, key):
""" k in T -> True if T has a key k, else False """
try:
self.get_value(key)
return True
except KeyError:
return False
def __len__(self):
""" x.__len__() <==> len(x) """
return self.count
def __min__(self):
""" x.__min__() <==> min(x) """
return self.min_item()
def __max__(self):
""" x.__max__() <==> max(x) """
return self.max_item()
def __and__(self, other):
""" x.__and__(other) <==> self & other """
return self.intersection(other)
def __or__(self, other):
""" x.__or__(other) <==> self | other """
return self.union(other)
def __sub__(self, other):
""" x.__sub__(other) <==> self - other """
return self.difference(other)
def __xor__(self, other):
""" x.__xor__(other) <==> self ^ other """
return self.symmetric_difference(other)
def discard(self, key):
""" x.discard(k) -> None, remove k from T, if k is present """
try:
self.remove(key)
except KeyError:
pass
def __del__(self):
self.clear()
def is_empty(self):
""" x.is_empty() -> False if T contains any items else True"""
return self.count == 0
def keys(self, reverse=False):
""" T.iterkeys([reverse]) -> an iterator over the keys of T, in ascending
order if reverse is True, iterate in descending order, reverse defaults
to False
"""
return ( item[0] for item in self.items(reverse) )
__iter__ = keys
def __reversed__(self):
return self.keys(reverse=True)
def values(self, reverse=False):
""" T.values([reverse]) -> an iterator over the values of T, in ascending order
if reverse is True, iterate in descending order, reverse defaults to False
"""
return ( item[1] for item in self.items(reverse) )
def items(self, reverse=False):
""" T.items([reverse]) -> an iterator over the (key, value) items of T,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False
"""
if self.is_empty():
return []
def default_iterator(node):
direction = 1 if reverse else 0
other = 1 - direction
go_down = True
while True:
if node.has_child(direction) and go_down:
node.push()
node.down(direction)
else:
yield node.item
if node.has_child(other):
node.down(other)
go_down = True
else:
if node.stack_is_empty():
return # all done
node.pop()
go_down = False
treewalker = self.get_walker()
try: # specialized iterators
if reverse:
return treewalker.iter_items_backward()
else:
return treewalker.iter_items_forward()
except AttributeError:
return default_iterator(treewalker)
def __getitem__(self, key):
""" x.__getitem__(y) <==> x[y] """
if isinstance(key, slice):
return TreeSlice(self, key.start, key.stop)
else:
return self.get_value(key)
def __setitem__(self, key, value):
""" x.__setitem__(i, y) <==> x[i]=y """
if isinstance(key, slice):
raise ValueError('setslice is not supported')
self.insert(key, value)
def __delitem__(self, key):
""" x.__delitem__(y) <==> del x[y] """
if isinstance(key, slice):
self.delitems(self.keyslice(key.start, key.stop))
else:
self.remove(key)
def delitems(self, keys):
""" T.delitems(keys) -> remove all items by keys
"""
# convert generator to a set, because the content of the
# tree will be modified!
for key in frozenset(keys):
self.remove(key)
def keyslice(self, startkey, endkey):
""" T.keyslice(startkey, endkey) -> key iterator:
startkey <= key < endkey.
"""
return ( item[0] for item in self.itemslice(startkey, endkey) )
def itemslice(self, startkey, endkey):
""" T.itemslice(s, e) -> item iterator: s <= key < e.
if s is None: start with min element -> T[:e]
if e is None: end with max element -> T[s:]
T[:] -> all items
"""
if self.is_empty():
return
if startkey is None:
# no lower bound
can_go_left = lambda: node.has_left() and visit_left
else:
# don't visit subtrees without keys in search range
can_go_left = lambda: node.key > startkey and node.has_left() and visit_left
if endkey is None:
# no upper bound
can_go_right = lambda: node.has_right()
else:
# don't visit subtrees without keys in search range
can_go_right = lambda: node.key < endkey and node.has_right()
if (startkey, endkey) == (None, None):
key_in_range = lambda: True
elif startkey is None:
key_in_range = lambda: node.key < endkey
elif endkey is None:
key_in_range = lambda: node.key >= startkey
else:
key_in_range = lambda: startkey <= node.key < endkey
node = self.get_walker()
visit_left = True
while True:
if can_go_left():
node.push()
node.go_left()
else:
if key_in_range():
yield node.item
if can_go_right():
node.go_right()
visit_left = True
else:
if node.stack_is_empty():
return
node.pop()
# left side is already done
visit_left = False
def valueslice(self, startkey, endkey):
""" T.valueslice(startkey, endkey) -> value iterator:
startkey <= key < endkey.
"""
return ( item[1] for item in self.itemslice(startkey, endkey) )
def get_value(self, key):
node = self.root
while node is not None:
if key == node.key:
return node.value
elif key < node.key:
node = node.left
else:
node = node.right
raise KeyError(str(key))
def __getstate__(self):
return dict(self.items())
def __setstate__(self, state):
# note for myself: this is called like __init__, so don't use clear()
# to remove existing data!
self._root = None
self._count = 0
self.update(state)
def setdefault(self, key, default=None):
""" T.setdefault(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T """
try:
return self.get_value(key)
except KeyError:
self.insert(key, default)
return default
def update(self, *args):
""" T.update(E) -> None. Update T from E : for (k, v) in E: T[k] = v """
for items in args:
try:
generator = items.items()
except AttributeError:
generator = iter(items)
for key, value in generator:
self.insert(key, value)
@classmethod
def fromkeys(cls, iterable, value=None):
""" T.fromkeys(S[,v]) -> New tree with keys from S and values equal to v.
v defaults to None.
"""
tree = cls()
for key in iterable:
tree.insert(key, value)
return tree
def get(self, key, default=None):
""" T.get(k[,d]) -> T[k] if k in T, else d. d defaults to None. """
try:
return self.get_value(key)
except KeyError:
return default
def pop(self, key, *args):
""" T.pop(k[,d]) -> v, remove specified key and return the corresponding value
If key is not found, d is returned if given, otherwise KeyError is raised
"""
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got %d" % (1 + len(args)))
try:
value = self.get_value(key)
self.remove(key)
return value
except KeyError:
if len(args) == 0:
raise
else:
return args[0]
def popitem(self):
""" T.popitem() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if T is empty
"""
if self.is_empty():
raise KeyError("popitem(): tree is empty")
walker = self.get_walker()
walker.goto_leaf()
result = walker.item
self.remove(walker.key)
return result
def foreach(self, func, order=0):
""" Visit all tree nodes and process key, value.
parm func: function(key, value)
param int order: inorder = 0, preorder = -1, postorder = +1
"""
def _traverse():
if order == -1:
func(node.key, node.value)
if node.has_left():
node.push()
node.go_left()
_traverse()
node.pop()
if order == 0:
func(node.key, node.value)
if node.has_right():
node.push()
node.go_right()
_traverse()
node.pop()
if order == +1:
func(node.key, node.value)
node = self.get_walker()
_traverse()
def min_item(self):
""" Get item with min key of tree, raises ValueError if tree is empty. """
if self.count == 0:
raise ValueError("Tree is empty")
node = self._root
while node.left is not None:
node = node.left
return (node.key, node.value)
def pop_min(self):
""" T.pop_min() -> (k, v), remove item with minimum key, raise ValueError
if T is empty.
"""
item = self.min_item()
self.remove(item[0])
return item
def min_key(self):
""" Get min key of tree, raises ValueError if tree is empty. """
key, value = self.min_item()
return key
def prev_item(self, key):
""" Get predecessor (k,v) pair of key, raises KeyError if key is min key
or key does not exist.
"""
if self.count == 0:
raise KeyError("Tree is empty")
walker = self.get_walker()
return walker.prev_item(key)
def succ_item(self, key):
""" Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist.
"""
if self.count == 0:
raise KeyError("Tree is empty")
walker = self.get_walker()
return walker.succ_item(key)
def prev_key(self, key):
""" Get predecessor to key, raises KeyError if key is min key
or key does not exist.
"""
key, value = self.prev_item(key)
return key
def succ_key(self, key):
""" Get successor to key, raises KeyError if key is max key
or key does not exist.
"""
key, value = self.succ_item(key)
return key
def floor_item(self, key):
""" Get the element (k,v) pair associated with the greatest key less
than or equal to the given key, raises KeyError if there is no such key.
"""
if self.count == 0:
raise KeyError("Tree is empty")
walker = self.get_walker()
return walker.floor_item(key)
def floor_key(self, key):
""" Get the greatest key less than or equal to the given key, raises
KeyError if there is no such key.
"""
key, value = self.floor_item(key)
return key
def ceiling_item(self, key):
""" Get the element (k,v) pair associated with the smallest key greater
than or equal to the given key, raises KeyError if there is no such key.
"""
if self.count == 0:
raise KeyError("Tree is empty")
walker = self.get_walker()
return walker.ceiling_item(key)
def ceiling_key(self, key):
""" Get the smallest key greater than or equal to the given key, raises
KeyError if there is no such key.
"""
key, value = self.ceiling_item(key)
return key
def max_item(self):
""" Get item with max key of tree, raises ValueError if tree is empty. """
if self.count == 0:
raise ValueError("Tree is empty")
node = self._root
while node.right is not None:
node = node.right
return (node.key, node.value)
def pop_max(self):
""" T.pop_max() -> (k, v), remove item with maximum key, raise ValueError
if T is empty.
"""
item = self.max_item()
self.remove(item[0])
return item
def max_key(self):
""" Get max key of tree, raises ValueError if tree is empty. """
key, value = self.max_item()
return key
def nsmallest(self, n, pop=False):
""" T.nsmallest(n) -> get list of n smallest items (k, v).
If pop is True, remove items from T.
"""
if pop:
return [self.pop_min() for _ in range(min(len(self), n))]
else:
items = self.items()
return [ next(items) for _ in range(min(len(self), n)) ]
def nlargest(self, n, pop=False):
""" T.nlargest(n) -> get list of n largest items (k, v).
If pop is True remove items from T.
"""
if pop:
return [self.pop_max() for _ in range(min(len(self), n))]
else:
items = self.items(reverse=True)
return [ next(items) for _ in range(min(len(self), n)) ]
def intersection(self, *trees):
""" x.intersection(t1, t2, ...) -> Tree, with keys *common* to all trees
"""
thiskeys = frozenset(self.keys())
sets = _build_sets(trees)
rkeys = thiskeys.intersection(*sets)
return self.__class__( ((key, self.get(key)) for key in rkeys) )
def union(self, *trees):
""" x.union(t1, t2, ...) -> Tree with keys from *either* trees
"""
thiskeys = frozenset(self.keys())
rkeys = thiskeys.union(*_build_sets(trees))
return self.__class__( ((key, self.get(key)) for key in rkeys) )
def difference(self, *trees):
""" x.difference(t1, t2, ...) -> Tree with keys in T but not any of t1,
t2, ...
"""
thiskeys = frozenset(self.keys())
rkeys = thiskeys.difference(*_build_sets(trees))
return self.__class__( ((key, self.get(key)) for key in rkeys) )
def symmetric_difference(self, tree):
""" x.symmetric_difference(t1) -> Tree with keys in either T and t1 but
not both
"""
thiskeys = frozenset(self.keys())
rkeys = thiskeys.symmetric_difference(frozenset(tree.keys()))
return self.__class__( ((key, self.get(key)) for key in rkeys) )
def issubset(self, tree):
""" x.issubset(tree) -> True if every element in x is in tree """
thiskeys = frozenset(self.keys())
return thiskeys.issubset(frozenset(tree.keys()))
def issuperset(self, tree):
""" x.issubset(tree) -> True if every element in tree is in x """
thiskeys = frozenset(self.keys())
return thiskeys.issuperset(frozenset(tree.keys()))
def isdisjoint(self, tree):
""" x.isdisjoint(S) -> True if x has a null intersection with tree """
thiskeys = frozenset(self.keys())
return thiskeys.isdisjoint(frozenset(tree.keys()))
def _build_sets(trees):
return [ frozenset(tree.keys()) for tree in trees ]
|
poojavade/Genomics_Docker
|
refs/heads/master
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/boto-2.19.0-py2.7.egg/boto/beanstalk/layer1.py
|
1
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
|
PaulKinlan/physical-web-1
|
refs/heads/master
|
metadata-server/main.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import json
import logging
from datetime import datetime, timedelta
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from urlparse import urljoin
import os
import re
class BaseModel(ndb.Model):
added_on = ndb.DateTimeProperty(auto_now_add = True)
updated_on = ndb.DateTimeProperty(auto_now = True)
class Device(BaseModel):
name = ndb.StringProperty()
url = ndb.StringProperty()
class SiteInformation(BaseModel):
url = ndb.StringProperty()
favicon_url = ndb.StringProperty()
title = ndb.StringProperty()
description = ndb.StringProperty()
content = ndb.TextProperty()
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
input_object = json.loads(input_data) # Data is not sanitised.
metadata_output = []
output = {
"metadata": metadata_output
}
devices = []
objects = input_object["objects"]
# Resolve the devices
for obj in objects:
key_id = None
url = None
force = False
if "id" in obj:
key_id = obj["id"]
elif "url" in obj:
key_id = obj["url"]
url = obj["url"]
if "force" in obj:
force = True
# We need to go and fetch. We probably want to asyncly fetch.
rssi = obj["rssi"]
# In this model we can only deal with one device with a given ID.
device = Device.get_or_insert(key_id, name = key_id, url = url)
device_data = {
"id": device.name
}
if force or device.url is not None:
# Really if we don't have the data we should not return it.
siteInfo = SiteInformation.get_by_id(device.url)
if force or siteInfo is None or siteInfo.updated_on < datetime.now() - timedelta(minutes=5):
# If we don't have the data or it is older than 5 hours, fetch.
siteInfo = FetchAndStoreUrl(siteInfo, device.url)
logging.info(siteInfo)
if siteInfo is not None:
device_data["url"] = siteInfo.url
device_data["title"] = siteInfo.title
device_data["description"] = siteInfo.description
device_data["icon"] = siteInfo.favicon_url
device_data["favicon_url"] = siteInfo.favicon_url
else:
device_data["url"] = device.url
metadata_output.append(device_data)
# Resolve from DB based off key.
logging.info(output)
self.response.out.write(json.dumps(output))
class SaveUrl(webapp2.RequestHandler):
def post(self):
name = self.request.get("name")
url = self.request.get("url")
title = ""
icon = "/favicon.ico"
device = Device.get_or_insert(name, name = name, url = url)
device.url = url
device.put()
# Index the page
FetchAndStoreUrl(device.url)
self.redirect("/index.html")
def FetchAndStoreUrl(siteInfo, url):
# Index the page
result = urlfetch.fetch(url)
if result.status_code == 200:
title = ""
description = ""
final_url = result.final_url or url
icon = urljoin(final_url, "/favicon.ico")
# parse the content
title_search = re.search('<title([^>]*)>(.+)</title>', result.content, flags=re.I)
description_search = re.search('<meta name="description" content="([^\"]+)', result.content, flags=re.I) or \
re.search('<meta content="([^\"]+) name="description"', result.content, flags=re.I)
if title_search:
title = title_search.group(2)
if description_search:
description = description_search.group(1)
icon_search = re.search('<link rel="shortcut icon"([^>]+)href="(?P<URL>[^\"]+)', result.content, flags=re.I) or \
re.search("<link rel='shortcut icon'([^>]+)href='(?P<URL>[^\']+)", result.content, flags=re.I) or \
re.search('<link href="(?P<URL>[^\"]+)([^>]+)rel="shortcut icon"', result.content, flags=re.I) or \
re.search("<link href='(?P<URL>[^\']+)([^>]+)rel='shortcut icon'", result.content, flags=re.I) or \
re.search('<link rel="icon"([^>]+)href="(?P<URL>[^\"]+)"', result.content, flags=re.I) or \
re.search("<link rel='icon'([^>]+)href='(?P<URL>[^\']+)'", result.content, flags=re.I) or \
re.search('<link rel="apple-touch-icon"([^>]+)href=(?P<URL>[^\"]+)', result.content, flags=re.I) or \
re.search("<link rel='apple-touch-icon'([^>]+)href=(?P<URL>[^\']+)", result.content, flags=re.I)
if icon_search:
icon = icon_search.group('URL')
if icon[0:4] != "http":
icon = urljoin(final_url, icon)
logging.info("final url %s" % final_url)
if siteInfo is None:
siteInfo = SiteInformation.get_or_insert(url,
url = final_url,
title = title,
favicon_url = icon,
description = description,
content = result.content)
else:
# update the data because it already exists
siteInfo.url = final_url
siteInfo.title = title
siteInfo.favicon_url = icon
siteInfo.description = description
siteInfo.content = result.content
siteInfo.put()
return siteInfo
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write("")
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/add-device', SaveUrl)
], debug=True)
|
martynovp/edx-platform
|
refs/heads/master
|
cms/wsgi.py
|
167
|
# Patch the xml libs before anything else.
from safe_lxml import defuse_xml_libs
defuse_xml_libs()
# Disable PyContract contract checking when running as a webserver
import contracts
contracts.disable_all()
import openedx.core.operations
openedx.core.operations.install_memory_dumper()
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cms.envs.aws")
import cms.startup as startup
startup.run()
# This application object is used by the development server
# as well as any WSGI server configured to use this file.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
powerlim2/project_free_insight
|
refs/heads/master
|
data_api/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
|
499
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree",
"pulldom"]
import sys
from .. import constants
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
|
Applied-GeoSolutions/geokit
|
refs/heads/master
|
layers/utils.py
|
1
|
import os
from django.conf import settings
from django.http import HttpResponse
from django.middleware.gzip import GZipMiddleware
from django.template.loader import render_to_string
from layers.models import Layer
def mapnik_xml(func):
def wrapped_function(request, layer_name, *args, **kwargs):
layer_cache_path = os.path.realpath(settings.STATIC_ROOT + '/layers')
layer = Layer.objects.get(name=layer_name)
name = layer.query_hash()
layer_path = layer_cache_path + '/{}.xml'.format(name)
if not os.path.isfile(layer_path):
mapnik_config = render_to_string(
'layers/mapnik/mapnik_config.xml',
{'layer_name': layer_name, 'query': layer.mapnik_query()}
)
with open(layer_path, 'w') as f:
f.write(mapnik_config)
return func(request, layer_name, *args, **kwargs)
return wrapped_function
def tile_cache(func):
def wrapped_function(request, layer_name, z, x, y):
x, y, z = int(x), int(y), int(z)
tile_cache_path = os.path.realpath(settings.STATIC_ROOT + '/tiles')
layer = Layer.objects.get(name=layer_name)
name = layer.query_hash()
tile_path = tile_cache_path + '/{}/{}/{}/{}.pbf'.format(name, z, x, y)
if not os.path.isfile(tile_path):
response = func(request, name, z, x, y)
else:
print "Sending cached tile..."
response = HttpResponse(content_type='application/x-protobuf')
with open(tile_path, 'rb') as f:
response.write(f.read())
gzip_middleware = GZipMiddleware()
response = gzip_middleware.process_response(request, response)
return response
return wrapped_function
|
valtech-mooc/edx-platform
|
refs/heads/master
|
lms/djangoapps/mobile_api/users/tests.py
|
8
|
"""
Tests for users API
"""
import datetime
from django.utils import timezone
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from student.models import CourseEnrollment
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory
from .. import errors
from ..testutils import MobileAPITestCase, MobileAuthTestMixin, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin
from .serializers import CourseEnrollmentSerializer
class TestUserDetailApi(MobileAPITestCase, MobileAuthUserTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>...
"""
REVERSE_INFO = {'name': 'user-detail', 'params': ['username']}
def test_success(self):
self.login()
response = self.api_response()
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['email'], self.user.email)
class TestUserInfoApi(MobileAPITestCase, MobileAuthTestMixin):
"""
Tests for /api/mobile/v0.5/my_user_info
"""
def reverse_url(self, reverse_args=None, **kwargs):
return '/api/mobile/v0.5/my_user_info'
def test_success(self):
"""Verify the endpoint redirects to the user detail endpoint"""
self.login()
response = self.api_response(expected_response_code=302)
self.assertTrue(self.username in response['location'])
class TestUserEnrollmentApi(MobileAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for /api/mobile/v0.5/users/<user_name>/course_enrollments/
"""
REVERSE_INFO = {'name': 'courseenrollment-detail', 'params': ['username']}
ALLOW_ACCESS_TO_UNRELEASED_COURSE = True
def verify_success(self, response):
super(TestUserEnrollmentApi, self).verify_success(response)
courses = response.data
self.assertEqual(len(courses), 1)
found_course = courses[0]['course']
self.assertTrue('video_outline' in found_course)
self.assertTrue('course_handouts' in found_course)
self.assertEqual(found_course['id'], unicode(self.course.id))
self.assertEqual(courses[0]['mode'], 'honor')
def verify_failure(self, response):
self.assertEqual(response.status_code, 200)
courses = response.data
self.assertEqual(len(courses), 0)
def test_sort_order(self):
self.login()
num_courses = 3
courses = []
for course_num in range(num_courses):
courses.append(CourseFactory.create(mobile_available=True))
self.enroll(courses[course_num].id)
# verify courses are returned in the order of enrollment, with most recently enrolled first.
response = self.api_response()
for course_num in range(num_courses):
self.assertEqual(
response.data[course_num]['course']['id'], # pylint: disable=no-member
unicode(courses[num_courses - course_num - 1].id)
)
def test_no_certificate(self):
self.login_and_enroll()
response = self.api_response()
certificate_data = response.data[0]['certificate'] # pylint: disable=no-member
self.assertDictEqual(certificate_data, {})
def test_certificate(self):
self.login_and_enroll()
certificate_url = "http://test_certificate_url"
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url=certificate_url,
)
response = self.api_response()
certificate_data = response.data[0]['certificate'] # pylint: disable=no-member
self.assertEquals(certificate_data['url'], certificate_url)
def test_no_facebook_url(self):
self.login_and_enroll()
response = self.api_response()
course_data = response.data[0]['course'] # pylint: disable=no-member
self.assertIsNone(course_data['social_urls']['facebook'])
def test_facebook_url(self):
self.login_and_enroll()
self.course.facebook_url = "http://facebook.com/test_group_page"
self.store.update_item(self.course, self.user.id)
response = self.api_response()
course_data = response.data[0]['course'] # pylint: disable=no-member
self.assertEquals(course_data['social_urls']['facebook'], self.course.facebook_url)
class CourseStatusAPITestCase(MobileAPITestCase):
"""
Base test class for /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
REVERSE_INFO = {'name': 'user-course-status', 'params': ['username', 'course_id']}
def setUp(self):
"""
Creates a basic course structure for our course
"""
super(CourseStatusAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
)
self.sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category='vertical',
)
self.other_sub_section = ItemFactory.create(
parent=self.section,
category='sequential',
)
self.other_unit = ItemFactory.create(
parent=self.other_sub_section,
category='vertical',
)
class TestCourseStatusGET(CourseStatusAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for GET of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def test_success(self):
self.login_and_enroll()
response = self.api_response()
self.assertEqual(
response.data["last_visited_module_id"], # pylint: disable=no-member
unicode(self.sub_section.location)
)
self.assertEqual(
response.data["last_visited_module_path"], # pylint: disable=no-member
[unicode(module.location) for module in [self.sub_section, self.section, self.course]]
)
class TestCourseStatusPATCH(CourseStatusAPITestCase, MobileAuthUserTestMixin, MobileEnrolledCourseAccessTestMixin):
"""
Tests for PATCH of /api/mobile/v0.5/users/<user_name>/course_status_info/{course_id}
"""
def url_method(self, url, **kwargs):
# override implementation to use PATCH method.
return self.client.patch(url, data=kwargs.get('data', None)) # pylint: disable=no-member
def test_success(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": unicode(self.other_unit.location)})
self.assertEqual(
response.data["last_visited_module_id"], # pylint: disable=no-member
unicode(self.other_sub_section.location)
)
def test_invalid_module(self):
self.login_and_enroll()
response = self.api_response(data={"last_visited_module_id": "abc"}, expected_response_code=400)
self.assertEqual(
response.data, # pylint: disable=no-member
errors.ERROR_INVALID_MODULE_ID
)
def test_nonexistent_module(self):
self.login_and_enroll()
non_existent_key = self.course.id.make_usage_key('video', 'non-existent')
response = self.api_response(data={"last_visited_module_id": non_existent_key}, expected_response_code=400)
self.assertEqual(
response.data, # pylint: disable=no-member
errors.ERROR_INVALID_MODULE_ID
)
def test_no_timezone(self):
self.login_and_enroll()
past_date = datetime.datetime.now()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": past_date.isoformat() # pylint: disable=maybe-no-member
},
expected_response_code=400
)
self.assertEqual(
response.data, # pylint: disable=no-member
errors.ERROR_INVALID_MODIFICATION_DATE
)
def _date_sync(self, date, initial_unit, update_unit, expected_subsection):
"""
Helper for test cases that use a modification to decide whether
to update the course status
"""
self.login_and_enroll()
# save something so we have an initial date
self.api_response(data={"last_visited_module_id": unicode(initial_unit.location)})
# now actually update it
response = self.api_response(
data={
"last_visited_module_id": unicode(update_unit.location),
"modification_date": date.isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"], # pylint: disable=no-member
unicode(expected_subsection.location)
)
def test_old_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=-100)
self._date_sync(date, self.unit, self.other_unit, self.sub_section)
def test_new_date(self):
self.login_and_enroll()
date = timezone.now() + datetime.timedelta(days=100)
self._date_sync(date, self.unit, self.other_unit, self.other_sub_section)
def test_no_initial_date(self):
self.login_and_enroll()
response = self.api_response(
data={
"last_visited_module_id": unicode(self.other_unit.location),
"modification_date": timezone.now().isoformat()
}
)
self.assertEqual(
response.data["last_visited_module_id"], # pylint: disable=no-member
unicode(self.other_sub_section.location)
)
def test_invalid_date(self):
self.login_and_enroll()
response = self.api_response(data={"modification_date": "abc"}, expected_response_code=400)
self.assertEqual(
response.data, # pylint: disable=no-member
errors.ERROR_INVALID_MODIFICATION_DATE
)
class TestCourseEnrollmentSerializer(MobileAPITestCase):
"""
Test the course enrollment serializer
"""
def test_success(self):
self.login_and_enroll()
serialized = CourseEnrollmentSerializer(CourseEnrollment.enrollments_for_user(self.user)[0]).data # pylint: disable=no-member
self.assertEqual(serialized['course']['video_outline'], None)
self.assertEqual(serialized['course']['name'], self.course.display_name)
self.assertEqual(serialized['course']['number'], self.course.id.course)
self.assertEqual(serialized['course']['org'], self.course.id.org)
def test_with_display_overrides(self):
self.login_and_enroll()
self.course.display_coursenumber = "overridden_number"
self.course.display_organization = "overridden_org"
self.store.update_item(self.course, self.user.id)
serialized = CourseEnrollmentSerializer(CourseEnrollment.enrollments_for_user(self.user)[0]).data # pylint: disable=no-member
self.assertEqual(serialized['course']['number'], self.course.display_coursenumber)
self.assertEqual(serialized['course']['org'], self.course.display_organization)
|
sgraham/nope
|
refs/heads/master
|
chrome/test/data/safe_browsing/two_phase_testserver.py
|
60
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Testserver for the two phase upload protocol."""
import base64
import BaseHTTPServer
import hashlib
import os
import sys
import urlparse
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', '..', 'net',
'tools', 'testserver'))
import testserver_base
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') == 'chunked':
return ''
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
def do_GET(self):
print 'GET', self.path
self.send_error(400, 'GET not supported')
def do_POST(self):
request_body = self.ReadRequestBody()
print 'POST', repr(self.path), repr(request_body)
kStartHeader = 'x-goog-resumable'
if kStartHeader not in self.headers:
self.send_error(400, 'Missing header: ' + kStartHeader)
return
if self.headers.get(kStartHeader) != 'start':
self.send_error(400, 'Invalid %s header value: %s' % (
kStartHeader, self.headers.get(kStartHeader)))
return
metadata_hash = hashlib.sha1(request_body).hexdigest()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_args = urlparse.parse_qs(query)
if query_args.get('p1close'):
self.close_connection = 1
return
put_url = 'http://%s:%d/put?%s,%s,%s' % (self.server.server_address[0],
self.server.server_port,
url_path,
metadata_hash,
base64.urlsafe_b64encode(query))
self.send_response(int(query_args.get('p1code', [201])[0]))
self.send_header('Location', put_url)
self.end_headers()
def do_PUT(self):
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
if url_path != '/put':
self.send_error(400, 'invalid path on 2nd phase: ' + url_path)
return
initial_path, metadata_hash, config_query_b64 = query.split(',', 2)
config_query = urlparse.parse_qs(base64.urlsafe_b64decode(config_query_b64))
request_body = self.ReadRequestBody()
print 'PUT', repr(self.path), len(request_body), 'bytes'
if config_query.get('p2close'):
self.close_connection = 1
return
self.send_response(int(config_query.get('p2code', [200])[0]))
self.end_headers()
self.wfile.write('%s\n%s\n%s\n' % (
initial_path,
metadata_hash,
hashlib.sha1(request_body).hexdigest()))
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for safebrowsing_test_server.py."""
def create_server(self, server_data):
server = BaseHTTPServer.HTTPServer((self.options.host, self.options.port),
RequestHandler)
print 'server started on port %d...' % server.server_port
server_data['port'] = server.server_port
return server
if __name__ == '__main__':
sys.exit(ServerRunner().main())
|
hainm/elyxer
|
refs/heads/master
|
src/elyxer/maths/misc.py
|
2
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# eLyXer -- convert LyX source files to HTML output.
#
# Copyright (C) 2009 Alex Fernández
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --end--
# Alex 20110109
# eLyXer misc commands, invoked by class name from the configuration file.
from elyxer.util.numbering import *
from elyxer.maths.command import *
from elyxer.maths.extracommand import *
from elyxer.maths.macro import *
class SetCounterFunction(CommandBit):
"A function which is used in the preamble to set a counter."
def parsebit(self, pos):
"Parse a function with [] and {} parameters."
counter = self.parseliteral(pos)
value = self.parseliteral(pos)
try:
self.setcounter(counter, int(value))
except:
Trace.error('Counter ' + counter + ' cannot be set to ' + value)
def setcounter(self, counter, value):
"Set a global counter."
Trace.debug('Setting counter ' + unicode(counter) + ' to ' + unicode(value))
NumberGenerator.generator.getcounter(counter).init(value)
class FormulaTag(CommandBit):
"A \\tag command."
def parsebit(self, pos):
"Parse the tag and apply it."
self.output = EmptyOutput()
self.tag = self.parseliteral(pos)
class MiscCommand(CommandBit):
"A generic command which maps to a command class."
commandmap = FormulaConfig.misccommands
def parsebit(self, pos):
"Find the right command to parse and parse it."
commandtype = globals()[self.translated]
return self.parsecommandtype(self.translated, commandtype, pos)
FormulaCommand.types += [MiscCommand]
|
louisLouL/pair_trading
|
refs/heads/master
|
capstone_env/lib/python3.6/site-packages/setuptools/depends.py
|
336
|
import sys
import imp
import marshal
from distutils.version import StrictVersion
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from .py33compat import Bytecode
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
Gisleude/speakerfight
|
refs/heads/master
|
deck/urls.py
|
11
|
from django.conf.urls import patterns
from smarturls import surl as url
import views
urlpatterns = patterns(
'',
url(regex=r'/my_proposals/',
view=views.ListMyProposals.as_view(),
name='my_proposals'),
url(regex=r'/events/',
view=views.ListEvents.as_view(),
name='list_events'),
url(regex=r'/events/create/',
view=views.CreateEvent.as_view(),
name='create_event'),
url(regex=r'/events/<slug:slug>/',
view=views.DetailEvent.as_view(),
name='view_event'),
url(regex=r'/events/<slug:slug>/update/',
view=views.UpdateEvent.as_view(),
name='update_event'),
url(regex=r'/events/<slug:slug>/export/',
view=views.ExportEvent.as_view(),
name='export_event'),
url(regex=r'/events/<slug:slug>/create_grade/',
view=views.CreateEventGrade.as_view(),
name='create_event_grade'),
url(regex=r'/events/<slug:slug>/proposals/create/',
view=views.CreateProposal.as_view(),
name='create_event_proposal'),
url(regex=r'/events/<slug:slug>/grade/',
view=views.DetailEventGrade.as_view(),
name='view_event_grade'),
url(regex=r'/events/<slug:event_slug>/'
r'proposals/<slug:slug>/update/',
view=views.UpdateProposal.as_view(),
name='update_proposal'),
url(regex=r'/events/<slug:event_slug>/'
r'proposals/<slug:slug>/delete/',
view=views.DeleteProposal.as_view(),
name='delete_proposal'),
url(regex=r'/events/<slug:event_slug>/'
r'proposals/<slug:slug>/rate/<slug:rate>/',
view=views.RateProposal.as_view(),
name='rate_proposal'),
url(regex=r'/events/<slug:event_slug>/'
r'proposals/<slug:slug>/approve_proposal/',
view=views.ApproveProposal.as_view(),
name='approve_proposal'),
url(regex=r'/events/<slug:event_slug>/'
r'proposals/<slug:slug>/disapprove_proposal/',
view=views.DisapproveProposal.as_view(),
name='disapprove_proposal'),
)
|
thejens/luigi
|
refs/heads/master
|
test/create_packages_archive_root/package/subpackage/submodule.py
|
122
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os # NOQA
|
c0710204/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/tests/utils.py
|
47
|
"""
Utilities for instructor unit tests
"""
import datetime
import json
import random
from django.utils.timezone import utc
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id, 'to_option': sent_to}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = unicode(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.choice(range(1950, 2000))
month = random.choice(range(1, 12))
day = random.choice(range(1, 28))
hour = random.choice(range(0, 23))
minute = random.choice(range(0, 59))
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=utc)
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
|
petrus-v/odoo
|
refs/heads/8.0
|
addons/bus/bus.py
|
325
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import select
import threading
import time
import random
import simplejson
import openerp
from openerp.osv import osv, fields
from openerp.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
TIMEOUT = 50
#----------------------------------------------------------
# Bus
#----------------------------------------------------------
def json_dump(v):
return simplejson.dumps(v, separators=(',', ':'))
def hashable(key):
if isinstance(key, list):
key = tuple(key)
return key
class ImBus(osv.Model):
_name = 'bus.bus'
_columns = {
'id' : fields.integer('Id'),
'create_date' : fields.datetime('Create date'),
'channel' : fields.char('Channel'),
'message' : fields.char('Message'),
}
def gc(self, cr, uid):
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2)
domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
ids = self.search(cr, openerp.SUPERUSER_ID, domain)
self.unlink(cr, openerp.SUPERUSER_ID, ids)
def sendmany(self, cr, uid, notifications):
channels = set()
for channel, message in notifications:
channels.add(channel)
values = {
"channel" : json_dump(channel),
"message" : json_dump(message)
}
self.pool['bus.bus'].create(cr, openerp.SUPERUSER_ID, values)
cr.commit()
if random.random() < 0.01:
self.gc(cr, uid)
if channels:
with openerp.sql_db.db_connect('postgres').cursor() as cr2:
cr2.execute("notify imbus, %s", (json_dump(list(channels)),))
def sendone(self, cr, uid, channel, message):
self.sendmany(cr, uid, [[channel, message]])
def poll(self, cr, uid, channels, last=0):
# first poll return the notification in the 'buffer'
if last == 0:
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT)
domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
else:
# else returns the unread notifications
domain = [('id','>',last)]
channels = [json_dump(c) for c in channels]
domain.append(('channel','in',channels))
notifications = self.search_read(cr, openerp.SUPERUSER_ID, domain)
return [{"id":notif["id"], "channel": simplejson.loads(notif["channel"]), "message":simplejson.loads(notif["message"])} for notif in notifications]
class ImDispatch(object):
def __init__(self):
self.channels = {}
def poll(self, dbname, channels, last, timeout=TIMEOUT):
# Dont hang ctrl-c for a poll request, we need to bypass private
# attribute access because we dont know before starting the thread that
# it will handle a longpolling request
if not openerp.evented:
current = threading.current_thread()
current._Thread__daemonic = True
# rename the thread to avoid tests waiting for a longpolling
current.setName("openerp.longpolling.request.%s" % current.ident)
registry = openerp.registry(dbname)
# immediatly returns if past notifications exist
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
# or wait for future ones
if not notifications:
event = self.Event()
for c in channels:
self.channels.setdefault(hashable(c), []).append(event)
try:
event.wait(timeout=timeout)
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
except Exception:
# timeout
pass
return notifications
def loop(self):
""" Dispatch postgres notifications to the relevant polling threads/greenlets """
_logger.info("Bus.loop listen imbus on db postgres")
with openerp.sql_db.db_connect('postgres').cursor() as cr:
conn = cr._cnx
cr.execute("listen imbus")
cr.commit();
while True:
if select.select([conn], [], [], TIMEOUT) == ([],[],[]):
pass
else:
conn.poll()
channels = []
while conn.notifies:
channels.extend(json.loads(conn.notifies.pop().payload))
# dispatch to local threads/greenlets
events = set()
for c in channels:
events.update(self.channels.pop(hashable(c),[]))
for e in events:
e.set()
def run(self):
while True:
try:
self.loop()
except Exception, e:
_logger.exception("Bus.loop error, sleep and retry")
time.sleep(TIMEOUT)
def start(self):
if openerp.evented:
# gevent mode
import gevent
self.Event = gevent.event.Event
gevent.spawn(self.run)
elif openerp.multi_process:
# disabled in prefork mode
return
else:
# threaded mode
self.Event = threading.Event
t = threading.Thread(name="%s.Bus" % __name__, target=self.run)
t.daemon = True
t.start()
return self
dispatch = ImDispatch().start()
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class Controller(openerp.http.Controller):
""" Examples:
openerp.jsonRpc('/longpolling/poll','call',{"channels":["c1"],last:0}).then(function(r){console.log(r)});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c1","message":"m1"});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c2","message":"m2"});
"""
@openerp.http.route('/longpolling/send', type="json", auth="public")
def send(self, channel, message):
if not isinstance(channel, basestring):
raise Exception("bus.Bus only string channels are allowed.")
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
return registry['bus.bus'].sendone(cr, uid, channel, message)
# override to add channels
def _poll(self, dbname, channels, last, options):
request.cr.close()
request._cr = None
return dispatch.poll(dbname, channels, last)
@openerp.http.route('/longpolling/poll', type="json", auth="public")
def poll(self, channels, last, options=None):
if options is None:
options = {}
if not dispatch:
raise Exception("bus.Bus unavailable")
if [c for c in channels if not isinstance(c, basestring)]:
print channels
raise Exception("bus.Bus only string channels are allowed.")
return self._poll(request.db, channels, last, options)
# vim:et:
|
Y3K/django
|
refs/heads/master
|
django/contrib/contenttypes/__init__.py
|
809
|
default_app_config = 'django.contrib.contenttypes.apps.ContentTypesConfig'
|
Motaku/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/gce.py
|
305
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pprint
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1"
def gce_connect(module, provider=None):
"""Return a Google Cloud Engine connection."""
service_account_email = module.params.get('service_account_email', None)
pem_file = module.params.get('pem_file', None)
project_id = module.params.get('project_id', None)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = os.environ.get('GCE_EMAIL', None)
if not project_id:
project_id = os.environ.get('GCE_PROJECT', None)
if not pem_file:
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
if service_account_email is None or pem_file is None:
try:
import secrets
except ImportError:
secrets = None
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not pem_file:
pem_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
# If we *still* don't have the credentials we need, then it's time to
# just fail out.
if service_account_email is None or pem_file is None or project_id is None:
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
return None
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
if provider is None:
provider = Provider.GCE
try:
gce = get_driver(provider)(service_account_email, pem_file,
datacenter=module.params.get('zone', None),
project=project_id)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except (RuntimeError, ValueError), e:
module.fail_json(msg=str(e), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gce
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: ' + pprint.pformat(vars(error))
|
michaelBenin/django-shop
|
refs/heads/master
|
setup.py
|
13
|
from setuptools import setup, find_packages
import os
import shop
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author="Christopher Glass",
author_email="tribaal@gmail.com",
name='django-shop',
version=shop.__version__,
description='An Advanced Django Shop',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='http://www.django-shop.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.4',
'django-classy-tags>=0.3.3',
'django-polymorphic>=0.2',
'south>=0.7.2',
'jsonfield>=0.9.6'
],
packages=find_packages(exclude=["example", "example.*"]),
include_package_data=True,
zip_safe=False,
)
|
pauloschilling/sentry
|
refs/heads/master
|
tests/sentry/web/api/tests.py
|
13
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import OrganizationMember, ProjectKey, User
from sentry.testutils import TestCase
from sentry.utils import json
class StoreViewTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-store', kwargs={'project_id': self.project.id})
@mock.patch('sentry.web.api.StoreView._parse_header')
def test_options_response(self, parse_header):
project = self.create_project()
pk = ProjectKey.objects.get_or_create(project=project)[0]
parse_header.return_value = {
'sentry_project': project.id,
'sentry_key': pk.public_key,
'sentry_version': '2.0',
}
resp = self.client.options(self.path)
assert resp.status_code == 200, resp.content
self.assertIn('Allow', resp)
self.assertEquals(resp['Allow'], 'GET, POST, HEAD, OPTIONS')
self.assertIn('Content-Length', resp)
self.assertEquals(resp['Content-Length'], '0')
@mock.patch('sentry.web.api.is_valid_origin', mock.Mock(return_value=False))
def test_options_response_with_invalid_origin(self):
resp = self.client.options(self.path, HTTP_ORIGIN='http://foo.com')
assert resp.status_code == 403, resp.content
self.assertIn('Access-Control-Allow-Origin', resp)
self.assertEquals(resp['Access-Control-Allow-Origin'], '*')
self.assertIn('X-Sentry-Error', resp)
assert resp['X-Sentry-Error'] == "Invalid origin: http://foo.com"
assert json.loads(resp.content)['error'] == resp['X-Sentry-Error']
@mock.patch('sentry.web.api.is_valid_origin', mock.Mock(return_value=False))
def test_options_response_with_invalid_referrer(self):
resp = self.client.options(self.path, HTTP_REFERER='http://foo.com')
assert resp.status_code == 403, resp.content
self.assertIn('Access-Control-Allow-Origin', resp)
self.assertEquals(resp['Access-Control-Allow-Origin'], '*')
self.assertIn('X-Sentry-Error', resp)
assert resp['X-Sentry-Error'] == "Invalid origin: http://foo.com"
assert json.loads(resp.content)['error'] == resp['X-Sentry-Error']
@mock.patch('sentry.web.api.is_valid_origin', mock.Mock(return_value=True))
def test_options_response_with_valid_origin(self):
resp = self.client.options(self.path, HTTP_ORIGIN='http://foo.com')
assert resp.status_code == 200, resp.content
self.assertIn('Access-Control-Allow-Origin', resp)
self.assertEquals(resp['Access-Control-Allow-Origin'], 'http://foo.com')
@mock.patch('sentry.web.api.is_valid_origin', mock.Mock(return_value=True))
def test_options_response_with_valid_referrer(self):
resp = self.client.options(self.path, HTTP_REFERER='http://foo.com')
assert resp.status_code == 200, resp.content
self.assertIn('Access-Control-Allow-Origin', resp)
self.assertEquals(resp['Access-Control-Allow-Origin'], 'http://foo.com')
@mock.patch('sentry.coreapi.ClientApiHelper.insert_data_to_database')
def test_scrubs_ip_address(self, mock_insert_data_to_database):
self.project.update_option('sentry:scrub_ip_address', True)
body = {
"message": "foo bar",
"sentry.interfaces.User": {"ip_address": "127.0.0.1"},
"sentry.interfaces.Http": {
"method": "GET",
"url": "http://example.com/",
"env": {"REMOTE_ADDR": "127.0.0.1"}
},
}
resp = self._postWithHeader(body)
assert resp.status_code == 200, resp.content
call_data = mock_insert_data_to_database.call_args[0][0]
assert not call_data['sentry.interfaces.User'].get('ip_address')
assert not call_data['sentry.interfaces.Http']['env'].get('REMOTE_ADDR')
class CrossDomainXmlTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-crossdomain-xml', kwargs={'project_id': self.project.id})
@mock.patch('sentry.web.api.get_origins')
def test_output_with_global(self, get_origins):
get_origins.return_value = '*'
resp = self.client.get(self.path)
get_origins.assert_called_once_with(self.project)
assert resp.status_code == 200, resp.content
self.assertEquals(resp['Content-Type'], 'application/xml')
self.assertTemplateUsed(resp, 'sentry/crossdomain.xml')
assert '<allow-access-from domain="*" secure="false" />' in resp.content
@mock.patch('sentry.web.api.get_origins')
def test_output_with_whitelist(self, get_origins):
get_origins.return_value = ['disqus.com', 'www.disqus.com']
resp = self.client.get(self.path)
get_origins.assert_called_once_with(self.project)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/xml')
self.assertTemplateUsed(resp, 'sentry/crossdomain.xml')
assert '<allow-access-from domain="disqus.com" secure="false" />' in resp.content
assert '<allow-access-from domain="www.disqus.com" secure="false" />' in resp.content
@mock.patch('sentry.web.api.get_origins')
def test_output_with_no_origins(self, get_origins):
get_origins.return_value = []
resp = self.client.get(self.path)
get_origins.assert_called_once_with(self.project)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/xml')
self.assertTemplateUsed(resp, 'sentry/crossdomain.xml')
assert '<allow-access-from' not in resp.content
def test_output_allows_x_sentry_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/xml')
self.assertTemplateUsed(resp, 'sentry/crossdomain.xml')
assert '<allow-http-request-headers-from domain="*" headers="*" secure="false" />' in resp.content
class CrossDomainXmlIndexTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-crossdomain-xml-index')
def test_permits_policies(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/xml')
self.assertTemplateUsed(resp, 'sentry/crossdomain_index.xml')
assert '<site-control permitted-cross-domain-policies="all" />' in resp.content
class SearchUsersTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-search-users', args=[self.organization.slug])
def setUp(self):
super(SearchUsersTest, self).setUp()
self.login_as(self.user)
def test_finds_users_from_organization_members(self):
otheruser = User.objects.create(first_name='Bob Ross', username='bobross', email='bob@example.com')
OrganizationMember.objects.create(
organization=self.team.organization,
user=otheruser,
)
resp = self.client.get(self.path, {'query': 'bob'})
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
assert json.loads(resp.content) == {
'results': [{
'id': otheruser.id,
'first_name': otheruser.first_name,
'username': otheruser.username,
'email': otheruser.email,
}],
'query': 'bob',
}
def test_does_not_include_users_who_are_not_members(self):
User.objects.create(first_name='Bob Ross', username='bobross', email='bob@example.com')
resp = self.client.get(self.path, {'query': 'bob'})
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
assert json.loads(resp.content) == {
'results': [],
'query': 'bob',
}
class SearchProjectsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-search-projects', args=[self.organization.slug])
def setUp(self):
super(SearchProjectsTest, self).setUp()
self.login_as(self.user)
def test_finds_projects_from_org(self):
project = self.create_project(
organization=self.organization,
team=self.team,
name='Sample',
)
resp = self.client.get(self.path, {'query': 'sample'})
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
assert json.loads(resp.content) == {
'results': [{
'id': project.id,
'slug': project.slug,
'name': project.name,
}],
'query': 'sample',
}
def test_does_not_include_projects_from_other_organizations(self):
org = self.create_organization(owner=self.user, name='Sample')
team = self.create_team(organization=org, name='Sample')
self.create_project(organization=org, team=team, name='Sample')
resp = self.client.get(self.path, {'query': 'sample'})
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
assert json.loads(resp.content) == {
'results': [],
'query': 'sample',
}
|
cratuki/solent
|
refs/heads/master
|
solent/pyinstaller/__init__.py
|
2
|
# // license
# Copyright 2016, Free Software Foundation.
#
# This file is part of Solent.
#
# Solent is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Solent is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Solent. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
def add_resource_from_relpath(mydir):
def rec_glob(p, files):
for d in glob.glob(p):
if os.path.isfile(d):
files.append(d)
rec_glob("%s/*" % d, files)
files = []
rec_glob("%s/*" % mydir, files)
extra_datas = []
for f in files:
tpl = (f, f, 'DATA')
extra_datas.append(tpl)
print('Added %s'%(str(tpl)))
return extra_datas
|
rockychen-dpaw/borgcollector
|
refs/heads/master
|
livelayermanager/forms.py
|
2
|
from django import forms
from tablemanager.models import Workspace
from livelayermanager.models import Datasource,Layer,SqlViewLayer
from borg_utils.form_fields import GeoserverSettingForm,MetaTilingFactorField,GridSetField
from borg_utils.form_fields import GroupedModelChoiceField,BorgSelect
from borg_utils.forms import BorgModelForm
class DatasourceForm(BorgModelForm,GeoserverSettingForm):
"""
A form for Datasource model
"""
max_connections = forms.IntegerField(label="Max concurrent connections",initial=10,min_value=1,max_value=128)
max_connections.setting_type = "geoserver_setting"
max_connections.key = "max connections"
connect_timeout = forms.IntegerField(label="Connect timeout in seconds",initial=30,min_value=1,max_value=3600)
connect_timeout.setting_type = "geoserver_setting"
connect_timeout.key = "Connection timeout"
min_connections = forms.IntegerField(label="Min concurrent connections",initial=1,min_value=1,max_value=128)
min_connections.setting_type = "geoserver_setting"
min_connections.key = "min connections"
max_connection_idle_time = forms.IntegerField(label="Max connection idle time",initial=300,min_value=1)
max_connection_idle_time.setting_type = "geoserver_setting"
max_connection_idle_time.key = "Max connection idle time"
fetch_size = forms.IntegerField(label="Fetch size",initial=1000,min_value=1)
fetch_size.setting_type = "geoserver_setting"
fetch_size.key = "fetch size"
workspace = GroupedModelChoiceField('publish_channel',queryset=Workspace.objects.all(),required=True,choice_family="workspace",choice_name="workspace_choices",widget=BorgSelect())
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(DatasourceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['workspace'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(DatasourceForm,self)._post_clean()
class Meta:
model = Datasource
fields = "__all__"
class LayerForm(BorgModelForm,GeoserverSettingForm):
"""
A form for Layer model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial={"enabled":True})
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(LayerForm, self).__init__(*args, **kwargs)
self.fields['table'].widget.attrs['readonly'] = True
instance = kwargs.get("instance")
if instance and instance.is_published:
self.fields['name'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(LayerForm,self)._post_clean()
class Meta:
model = Layer
fields = "__all__"
class SqlViewLayerForm(BorgModelForm,GeoserverSettingForm):
"""
A form for SqlViewLayer model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial={"enabled":True})
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
super(SqlViewLayerForm, self).__init__(*args, **kwargs)
instance = kwargs.get("instance")
if instance and instance.is_published:
self.fields['name'].widget.attrs['readonly'] = True
def _post_clean(self):
if self.errors:
return
self.set_setting_to_model()
super(SqlViewLayerForm,self)._post_clean()
class Meta:
model = SqlViewLayer
fields = "__all__"
|
mcking49/apache-flask
|
refs/heads/master
|
Python/Lib/lib2to3/fixes/fix_itertools.py
|
178
|
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/junos/junos_netconf.py
|
55
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_netconf
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Configures the Junos Netconf system service
description:
- This module provides an abstraction that enables and configures
the netconf system service running on Junos devices. This module
can be used to easily enable the Netconf API. Netconf provides
a programmatic interface for working with configuration and state
resources as defined in RFC 6242. If the C(netconf_port) is not
mentioned in the task by default netconf will be enabled on port 830
only.
extends_documentation_fragment: junos
options:
netconf_port:
description:
- This argument specifies the port the netconf service should
listen on for SSH connections. The default port as defined
in RFC 6242 is 830.
required: false
default: 830
aliases: ['listens_on']
version_added: "2.2"
state:
description:
- Specifies the state of the C(junos_netconf) resource on
the remote device. If the I(state) argument is set to
I(present) the netconf service will be configured. If the
I(state) argument is set to I(absent) the netconf service
will be removed from the configuration.
required: false
default: present
choices: ['present', 'absent']
notes:
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(network_cli). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
- If C(netconf_port) value is not mentioned in task by default it will be enabled on port 830 only.
Although C(netconf_port) value can be from 1 through 65535, avoid configuring access on a port
that is normally assigned for another service. This practice avoids potential resource conflicts.
"""
EXAMPLES = """
- name: enable netconf service on port 830
junos_netconf:
listens_on: 830
state: present
- name: disable netconf service
junos_netconf:
state: absent
"""
RETURN = """
commands:
description: Returns the command sent to the remote device
returned: when changed is True
type: str
sample: 'set system services netconf ssh port 830'
"""
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_connection
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.six import iteritems
USE_PERSISTENT_CONNECTION = True
def map_obj_to_commands(updates, module):
want, have = updates
commands = list()
if want['state'] == 'absent':
if have['state'] == 'present':
commands.append('delete system services netconf')
else:
if have['state'] == 'absent' or want['netconf_port'] != have.get('netconf_port'):
commands.append(
'set system services netconf ssh port %s' % want['netconf_port']
)
return commands
def parse_port(config):
match = re.search(r'port (\d+)', config)
if match:
return int(match.group(1))
def map_config_to_obj(module):
conn = get_connection(module)
out = conn.get(command='show configuration system services netconf')
if out is None:
module.fail_json(msg='unable to retrieve current config')
config = str(out).strip()
obj = {'state': 'absent'}
if 'ssh' in config:
obj.update({
'state': 'present',
'netconf_port': parse_port(config)
})
return obj
def validate_netconf_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='netconf_port must be between 1 and 65535')
def map_params_to_obj(module):
obj = {
'netconf_port': module.params['netconf_port'],
'state': module.params['state']
}
for key, value in iteritems(obj):
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(value, module)
return obj
def load_config(module, config, commit=False):
conn = get_connection(module)
try:
resp = conn.edit_config(to_list(config) + ['top'])
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
diff = resp.get('diff', '')
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
return to_text(diff, errors='surrogate_then_replace').strip()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
netconf_port=dict(type='int', default=830, aliases=['listens_on']),
state=dict(default='present', choices=['present', 'absent']),
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
diff = load_config(module, commands, commit=commit)
if diff:
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jss-emr/openerp-7-src
|
refs/heads/master
|
openerp/addons/account_payment/wizard/account_payment_populate_statement.py
|
40
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
data = self.read(cr, uid, ids, [], context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context.update({'move_line_ids': [line.move_line_id.id]})
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)
if line.move_line_id:
voucher_res = {
'type': 'payment',
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date or time.strftime('%Y-%m-%d'),
'amount': abs(amount),
'period_id': statement.period_id.id,
}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_line_id.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
st_line_id = statement_line_obj.create(cr, uid, {
'name': line.order_id.reference or '?',
'amount': - amount,
'type': 'supplier',
'partner_id': line.partner_id.id,
'account_id': line.move_line_id.account_id.id,
'statement_id': statement.id,
'ref': line.communication,
'voucher_id': voucher_id,
}, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
account_payment_populate_statement()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dufresnedavid/hr
|
refs/heads/8.0
|
__unported__/hr_labour_recruitment/__openerp__.py
|
18
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'New Employee Recruitment and Personnel Requests',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Recruitment of New Employees
============================
""",
'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,Odoo Community Association (OCA)",
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr_contract',
'hr_contract_state',
'hr_recruitment',
'hr_security',
],
'data': [
'security/ir.model.access.csv',
'hr_recruitment_data.xml',
'hr_recruitment_view.xml',
'hr_recruitment_workflow.xml',
],
'test': [
],
'installable': False,
}
|
MontmereLimited/django-lean
|
refs/heads/master
|
django_lean/lean_retention/tests/__init__.py
|
2
|
import os
import types
import unittest
for filename in os.listdir(os.path.dirname(__file__)):
if (filename[-3:] == ".py" and
filename != "__init__.py" and
filename[0] != '.'):
module = __import__('.'.join((__name__, filename[:-3])), (), (), ["*"])
for name in dir(module):
function = getattr(module, name)
if (isinstance(function, types.TypeType) and
issubclass(function, unittest.TestCase)):
globals()[name] = function
|
Kagami/kisa
|
refs/heads/master
|
lib/twisted/trial/test/test_test_visitor.py
|
90
|
from twisted.trial import unittest
from twisted.trial.runner import TestSuite, suiteVisit
pyunit = __import__('unittest')
class MockVisitor(object):
def __init__(self):
self.calls = []
def __call__(self, testCase):
self.calls.append(testCase)
class TestTestVisitor(unittest.TestCase):
def setUp(self):
self.visitor = MockVisitor()
def test_visitCase(self):
"""
Test that C{visit} works for a single test case.
"""
testCase = TestTestVisitor('test_visitCase')
testCase.visit(self.visitor)
self.assertEqual(self.visitor.calls, [testCase])
def test_visitSuite(self):
"""
Test that C{visit} hits all tests in a suite.
"""
tests = [TestTestVisitor('test_visitCase'),
TestTestVisitor('test_visitSuite')]
testSuite = TestSuite(tests)
testSuite.visit(self.visitor)
self.assertEqual(self.visitor.calls, tests)
def test_visitEmptySuite(self):
"""
Test that C{visit} on an empty suite hits nothing.
"""
TestSuite().visit(self.visitor)
self.assertEqual(self.visitor.calls, [])
def test_visitNestedSuite(self):
"""
Test that C{visit} recurses through suites.
"""
tests = [TestTestVisitor('test_visitCase'),
TestTestVisitor('test_visitSuite')]
testSuite = TestSuite([TestSuite([test]) for test in tests])
testSuite.visit(self.visitor)
self.assertEqual(self.visitor.calls, tests)
def test_visitPyunitSuite(self):
"""
Test that C{suiteVisit} visits stdlib unittest suites
"""
test = TestTestVisitor('test_visitPyunitSuite')
suite = pyunit.TestSuite([test])
suiteVisit(suite, self.visitor)
self.assertEqual(self.visitor.calls, [test])
def test_visitPyunitCase(self):
"""
Test that a stdlib test case in a suite gets visited.
"""
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
test = PyunitCase('test_foo')
TestSuite([test]).visit(self.visitor)
self.assertEqual(
[call.id() for call in self.visitor.calls], [test.id()])
|
suneeshtr/persona
|
refs/heads/master
|
node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/test/relative/gyptest-default.py
|
336
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default', formats=['msvs'])
# Run from down in foo.
test.run_gyp('a.gyp', chdir='foo/a')
sln = test.workpath('foo/a/a.sln')
sln_data = open(sln, 'rb').read()
vcproj = sln_data.count('b.vcproj')
vcxproj = sln_data.count('b.vcxproj')
if (vcproj, vcxproj) not in [(1, 0), (0, 1)]:
test.fail_test()
test.pass_test()
|
q1ang/scikit-learn
|
refs/heads/master
|
examples/ensemble/plot_forest_importances.py
|
241
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
dimacus/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/common/form_handling_tests.py
|
65
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import unittest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
class FormHandlingTests(unittest.TestCase):
def testShouldClickOnSubmitInputElements(self):
self._loadPage("formPage")
self.driver.find_element_by_id("submitButton").click()
self.driver.implicitly_wait(5)
self.assertEqual(self.driver.title, "We Arrive Here")
def testClickingOnUnclickableElementsDoesNothing(self):
self._loadPage("formPage")
self.driver.find_element_by_xpath("//body").click()
def testShouldBeAbleToClickImageButtons(self):
self._loadPage("formPage")
self.driver.find_element_by_id("imageButton").click()
self.driver.implicitly_wait(5)
self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldBeAbleToSubmitForms(self):
self._loadPage("formPage")
self.driver.find_element_by_name("login").submit()
self.driver.implicitly_wait(5)
self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldSubmitAFormWhenAnyInputElementWithinThatFormIsSubmitted(self):
self._loadPage("formPage")
self.driver.find_element_by_id("checky").submit()
self.driver.implicitly_wait(5)
self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldSubmitAFormWhenAnyElementWihinThatFormIsSubmitted(self):
self._loadPage("formPage")
self.driver.find_element_by_xpath("//form/p").submit()
self.driver.implicitly_wait(5)
self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldNotBeAbleToSubmitAFormThatDoesNotExist(self):
self._loadPage("formPage")
try:
self.driver.find_element_by_name("there is no spoon").submit()
self.fail("Expected NoSuchElementException to have been thrown")
except NoSuchElementException as e:
pass
except Exception as e:
self.fail("Expected NoSuchElementException but got " + str(e))
def testShouldBeAbleToEnterTextIntoATextAreaBySettingItsValue(self):
self._loadPage("javascriptPage")
textarea = self.driver.find_element_by_id("keyUpArea")
cheesey = "Brie and cheddar"
textarea.send_keys(cheesey)
self.assertEqual(textarea.get_attribute("value"), cheesey)
def testShouldEnterDataIntoFormFields(self):
self._loadPage("xhtmlTest")
element = self.driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
originalValue = element.get_attribute("value")
self.assertEqual(originalValue, "change")
element.clear()
element.send_keys("some text")
element = self.driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
newFormValue = element.get_attribute("value")
self.assertEqual(newFormValue, "some text")
def testShouldBeAbleToSelectACheckBox(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_id("checky")
self.assertEqual(checkbox.is_selected(), False)
checkbox.click()
self.assertEqual(checkbox.is_selected(), True)
checkbox.click()
self.assertEqual(checkbox.is_selected(), False)
def testShouldToggleTheCheckedStateOfACheckbox(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_id("checky")
self.assertEqual(checkbox.is_selected(), False)
checkbox.click()
self.assertEqual(checkbox.is_selected(), True)
checkbox.click()
self.assertEqual(checkbox.is_selected(), False)
def testTogglingACheckboxShouldReturnItsCurrentState(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_id("checky")
self.assertEqual(checkbox.is_selected(), False)
checkbox.click()
self.assertEqual(checkbox.is_selected(), True)
checkbox.click()
self.assertEqual(checkbox.is_selected(), False)
def testShouldBeAbleToSelectARadioButton(self):
self._loadPage("formPage")
radioButton = self.driver.find_element_by_id("peas")
self.assertEqual(radioButton.is_selected(), False)
radioButton.click()
self.assertEqual(radioButton.is_selected(), True)
def testShouldBeAbleToSelectARadioButtonByClickingOnIt(self):
self._loadPage("formPage")
radioButton = self.driver.find_element_by_id("peas")
self.assertEqual(radioButton.is_selected(), False)
radioButton.click()
self.assertEqual(radioButton.is_selected(), True)
def testShouldReturnStateOfRadioButtonsBeforeInteration(self):
self._loadPage("formPage")
radioButton = self.driver.find_element_by_id("cheese_and_peas")
self.assertEqual(radioButton.is_selected(), True)
radioButton = self.driver.find_element_by_id("cheese")
self.assertEqual(radioButton.is_selected(), False)
# [ExpectedException(typeof(NotImplementedException))]
#def testShouldThrowAnExceptionWhenTogglingTheStateOfARadioButton(self):
# self._loadPage("formPage")
# radioButton = self.driver.find_element_by_id("cheese"))
# radioButton.click()
# [IgnoreBrowser(Browser.IE, "IE allows toggling of an option not in a multiselect")]
# [ExpectedException(typeof(NotImplementedException))]
#def testTogglingAnOptionShouldThrowAnExceptionIfTheOptionIsNotInAMultiSelect(self):
# self._loadPage("formPage")
#
# select = self.driver.find_element_by_name("selectomatic"))
# option = select.find_elements_by_tag_name("option"))[0]
# option.click()
def testTogglingAnOptionShouldToggleOptionsInAMultiSelect(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
select = self.driver.find_element_by_name("multi")
option = select.find_elements_by_tag_name("option")[0]
selected = option.is_selected()
option.click()
self.assertFalse(selected == option.is_selected())
option.click()
self.assertTrue(selected == option.is_selected())
def testShouldThrowAnExceptionWhenSelectingAnUnselectableElement(self):
self._loadPage("formPage")
element = self.driver.find_element_by_xpath("//title")
try:
element.click()
self.fail("Expected WebDriverException to have been thrown")
except WebDriverException as e:
pass
except Exception as e:
self.fail("Expected WebDriverException but got " + str(type(e)))
def testSendingKeyboardEventsShouldAppendTextInInputs(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("working")
element.send_keys("Some")
value = element.get_attribute("value")
self.assertEqual(value, "Some")
element.send_keys(" text")
value = element.get_attribute("value")
self.assertEqual(value, "Some text")
def testShouldBeAbleToClearTextFromInputElements(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("working")
element.send_keys("Some text")
value = element.get_attribute("value")
self.assertTrue(len(value) > 0)
element.clear()
value = element.get_attribute("value")
self.assertEqual(len(value), 0)
def testEmptyTextBoxesShouldReturnAnEmptyStringNotNull(self):
self._loadPage("formPage")
emptyTextBox = self.driver.find_element_by_id("working")
self.assertEqual(emptyTextBox.get_attribute("value"), "")
emptyTextArea = self.driver.find_element_by_id("emptyTextArea")
self.assertEqual(emptyTextBox.get_attribute("value"), "")
def testShouldBeAbleToClearTextFromTextAreas(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("withText")
element.send_keys("Some text")
value = element.get_attribute("value")
self.assertTrue(len(value) > 0)
element.clear()
value = element.get_attribute("value")
self.assertEqual(len(value), 0)
def testRadioShouldNotBeSelectedAfterSelectingSibling(self):
self._loadPage("formPage")
cheese = self.driver.find_element_by_id("cheese")
peas = self.driver.find_element_by_id("peas")
cheese.click()
self.assertEqual(True, cheese.is_selected())
self.assertEqual(False, peas.is_selected())
peas.click()
self.assertEqual(False, cheese.is_selected())
self.assertEqual(True, peas.is_selected())
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
Shaps/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py
|
6
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class FilterModule(object):
def filters(self):
return {
'broken': lambda x: 'broken',
}
raise Exception('This is a broken filter plugin')
|
BaySchoolCS2/Golem
|
refs/heads/master
|
golemDaemon.py
|
1
|
from ConfigParser import ConfigParser
from golemUtils import breakUp
from WPAW import Wrapper
from time import sleep
from tinydb import import TinyDB, where
db = TinyDB('db.json')
badWords = db.table('badWords')
badPhrases2 = db.table('badPhrases2')
badPhrases3 = db.table('badPhrases3')
goodWords = db.table('goodWords')
goodPhrases2 = db.table('goodPhrases2')
goodPhrases3 = db.table('goodPhrases3')
config = ConfigParser()
config.readfp(open('config.ini'))
golem = Wrapper(user_agent="Clay Golem")
golem.token = config.get('Default','api_key')
done = []
while True:
words = []
phrase_2 = []
phrase_3 = []
golem.get_posts()
posts = golem.posts
for post in posts:
if post not in done:
words, phrase_2, phrase_3 = break_up(post["content"])
|
prometheanfire/portage
|
refs/heads/master
|
pym/portage/util/_eventloop/__init__.py
|
70
|
# Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
|
cannabiscoindev/cannabiscoin420
|
refs/heads/master-0.13
|
qa/rpc-tests/httpbasics.py
|
106
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because bitcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
hgl888/crosswalk-efl
|
refs/heads/efl/crosswalk-14/42.0.2311.39
|
tools/build/android/generate_version_code.py
|
39
|
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
VersionCode is needed for AndroidManifest.xml,
This script will generate version code based on
VERSION file for xwalk's runtime library apk.
"""
import optparse
import sys
def fetch_values_from_file(values_dict, file_name):
"""
Fetches KEYWORD=VALUE settings from the specified file.
Everything to the left of the first '=' is the keyword,
everything to the right is the value. No stripping of
white space, so beware.
The file must exist, otherwise you get the Python exception from open().
"""
for line in open(file_name, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
if key in ['MAJOR', 'MINOR', 'BUILD', 'PATCH']:
try:
values_dict[key] = int(val)
except ValueError:
return False
return True
def calculate_version_code(values_dict, shift):
"""
Version Code is calculated based on the four version integers.
Major is for crosswalk's large update, and minor is for based chromium.
Major and minor will always be increasing, so use the sum of them is
enough.
For each major and minor refresh, build will reset to 0. After that,
the build will be increasing for 6 weeks (12 weeks if we skip one upstream
beta rebasing), so 100 numbers for build are enough.
After we branch it from trunk, the patch will be increasing for the rest of
this branch's life, 100 numbers are also enough since it will last for most
24 weeks, but the version increasing will be much less frequent after branch
point.
Shift is the last bit for different configurations we want to upload to
PlayStore.
"""
try:
major = values_dict['MAJOR']
minor = values_dict['MINOR']
build = values_dict['BUILD']
patch = values_dict['PATCH']
return (major + minor) * 100000 +\
build * 1000 +\
patch * 10 +\
shift
except KeyError:
return 0
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--file', '-f', default='VERSION',
help='Path to the version file.')
option_parser.add_option('--shift', '-s', default=0, type='int',
help='Shift for different configurations, PlayStore '
'requires different version code for multiple apks')
options, _ = option_parser.parse_args()
versions = {}
version_code = 0
if fetch_values_from_file(versions, options.file):
version_code = calculate_version_code(versions, options.shift)
print '%d' % version_code
if version_code == 0:
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
exabon/godot
|
refs/heads/master
|
methods.py
|
5
|
import os
def add_source_files(self, sources, filetype, lib_env=None, shared=False):
import glob
import string
# if not lib_objects:
if not lib_env:
lib_env = self
if type(filetype) == type(""):
dir = self.Dir('.').abspath
list = glob.glob(dir + "/" + filetype)
for f in list:
sources.append(self.Object(f))
else:
for f in filetype:
sources.append(self.Object(f))
def build_shader_header(target, source, env):
for x in source:
print x
name = str(x)
name = name[name.rfind("/") + 1:]
name = name[name.rfind("\\") + 1:]
name = name.replace(".", "_")
fs = open(str(x), "r")
fd = open(str(x) + ".h", "w")
fd.write("/* this file has been generated by SCons, do not edit! */\n")
fd.write("static const char *" + name + "=\n")
line = fs.readline()
while(line):
line = line.replace("\r", "")
line = line.replace("\n", "")
line = line.replace("\\", "\\\\")
line = line.replace("\"", "\\\"")
fd.write("\"" + line + "\\n\"\n")
line = fs.readline()
fd.write(";\n")
return 0
def build_glsl_header(filename):
fs = open(filename, "r")
line = fs.readline()
vertex_lines = []
fragment_lines = []
uniforms = []
attributes = []
fbos = []
conditionals = []
texunits = []
texunit_names = []
ubos = []
ubo_names = []
reading = ""
line_offset = 0
vertex_offset = 0
fragment_offset = 0
while(line):
if (line.find("[vertex]") != -1):
reading = "vertex"
line = fs.readline()
line_offset += 1
vertex_offset = line_offset
continue
if (line.find("[fragment]") != -1):
reading = "fragment"
line = fs.readline()
line_offset += 1
fragment_offset = line_offset
continue
if (line.find("#ifdef ") != -1):
ifdefline = line.replace("#ifdef ", "").strip()
if (not ifdefline in conditionals):
conditionals += [ifdefline]
if (line.find("#elif defined(") != -1):
ifdefline = line.replace("#elif defined(", "").strip()
ifdefline = ifdefline.replace(")", "").strip()
if (not ifdefline in conditionals):
conditionals += [ifdefline]
import re
if re.search(r"^\s*uniform", line):
if (line.lower().find("texunit:") != -1):
# texture unit
texunit = str(int(line[line.find(":") + 1:].strip()))
uline = line[:line.lower().find("//")]
uline = uline.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in texunit_names):
texunits += [(x, texunit)]
texunit_names += [x]
elif (line.lower().find("ubo:") != -1):
# ubo
uboidx = str(int(line[line.find(":") + 1:].strip()))
uline = line[:line.lower().find("//")]
uline = uline[uline.find("uniform") + len("uniform"):]
uline = uline.replace(";", "")
uline = uline.replace("{", "").strip()
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in ubo_names):
ubos += [(x, uboidx)]
ubo_names += [x]
else:
uline = line.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in uniforms):
uniforms += [x]
if ((line.strip().find("in ") == 0 or line.strip().find("attribute ") == 0) and line.find("attrib:") != -1):
uline = line.replace("in ", "")
uline = uline.replace("attribute ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("attrib:") != -1):
name = name.strip()
bind = bind.replace("attrib:", "").strip()
attributes += [(name, bind)]
if (line.strip().find("out ") == 0):
uline = line.replace("out", "").strip()
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("drawbuffer:") != -1):
name = name.strip()
bind = bind.replace("drawbuffer:", "").strip()
fbos += [(name, bind)]
line = line.replace("\r", "")
line = line.replace("\n", "")
line = line.replace("\\", "\\\\")
line = line.replace("\"", "\\\"")
# line=line+"\\n\\" no need to anymore
if (reading == "vertex"):
vertex_lines += [line]
if (reading == "fragment"):
fragment_lines += [line]
line = fs.readline()
line_offset += 1
fs.close()
out_file = filename + ".h"
fd = open(out_file, "w")
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1:]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1:]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "\n")
fd.write("#define " + out_file_ifdef + "\n")
out_file_class = out_file_base.replace(".glsl.h", "").title().replace("_", "").replace(".", "") + "ShaderGL"
fd.write("\n\n")
fd.write("#include \"drivers/opengl/shader_gl.h\"\n\n\n")
fd.write("class " + out_file_class + " : public ShaderGL {\n\n")
fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n")
fd.write("public:\n\n")
if (len(conditionals)):
fd.write("\tenum Conditionals {\n")
for x in conditionals:
fd.write("\t\t" + x + ",\n")
fd.write("\t};\n\n")
if (len(uniforms)):
fd.write("\tenum Uniforms {\n")
for x in uniforms:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n")
if (len(conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n")
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, bool p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value?1:0); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU
const Transform2D &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}; """)
fd.write("\n\n#undef _FU\n\n\n")
fd.write("\tvirtual void init() {\n\n")
if (len(conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(conditionals)):
for x in conditionals:
fd.write("\t\t\t\"#define " + x + "\\n\",\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(uniforms)):
for x in uniforms:
fd.write("\t\t\t\"" + x + "\",\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if (len(attributes)):
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in attributes:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
if (len(fbos)):
fd.write("\t\tstatic FBOPair _fbo_pairs[]={\n")
for x in fbos:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic FBOPair *_fbo_pairs=NULL;\n")
if (len(ubos)):
fd.write("\t\tstatic UBOPair _ubo_pairs[]={\n")
for x in ubos:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic UBOPair *_ubo_pairs=NULL;\n")
if (len(texunits)):
fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n")
for x in texunits:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n")
fd.write("\t\tstatic const char* _vertex_code=\"\\\n")
for x in vertex_lines:
fd.write("\t\t\t" + x + "\n")
fd.write("\t\t\";\n\n")
fd.write("\t\tstatic const int _vertex_code_start=" + str(vertex_offset) + ";\n")
fd.write("\t\tstatic const char* _fragment_code=\"\\\n")
for x in fragment_lines:
fd.write("\t\t\t" + x + "\n")
fd.write("\t\t\";\n\n")
fd.write("\t\tstatic const int _fragment_code_start=" + str(fragment_offset) + ";\n")
fd.write("\t\tsetup(_conditional_strings," + str(len(conditionals)) + ",_uniform_strings," + str(len(uniforms)) + ",_attribute_pairs," + str(len(attributes)) + ",_fbo_pairs," + str(len(fbos)) + ",_ubo_pairs," + str(len(ubos)) + ",_texunit_pairs," + str(len(texunits)) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
fd.write("};\n\n")
fd.write("#endif\n\n")
fd.close()
def build_glsl_headers(target, source, env):
for x in source:
build_glsl_header(str(x))
return 0
def build_hlsl_dx9_header(filename):
fs = open(filename, "r")
line = fs.readline()
vertex_lines = []
fragment_lines = []
uniforms = []
fragment_uniforms = []
attributes = []
fbos = []
conditionals = []
reading = ""
line_offset = 0
vertex_offset = 0
fragment_offset = 0
while(line):
if (line.find("[vertex]") != -1):
reading = "vertex"
line = fs.readline()
line_offset += 1
vertex_offset = line_offset
continue
if (line.find("[fragment]") != -1):
reading = "fragment"
line = fs.readline()
line_offset += 1
fragment_offset = line_offset
continue
if (line.find("#ifdef ") != -1):
ifdefline = line.replace("#ifdef ", "").strip()
if (not ifdefline in conditionals):
conditionals += [ifdefline]
if (line.find("#elif defined(") != -1):
ifdefline = line.replace("#elif defined(", "").strip()
ifdefline = ifdefline.replace(")", "").strip()
if (not ifdefline in conditionals):
conditionals += [ifdefline]
if (line.find("uniform") != -1):
uline = line.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in uniforms):
uniforms += [x]
fragment_uniforms += [reading == "fragment"]
line = line.replace("\r", "")
line = line.replace("\n", "")
line = line.replace("\\", "\\\\")
line = line.replace("\"", "\\\"")
line = line + "\\n\\"
if (reading == "vertex"):
vertex_lines += [line]
if (reading == "fragment"):
fragment_lines += [line]
line = fs.readline()
line_offset += 1
fs.close()
out_file = filename + ".h"
fd = open(out_file, "w")
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1:]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1:]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "\n")
fd.write("#define " + out_file_ifdef + "\n")
out_file_class = out_file_base.replace(".hlsl.h", "").title().replace("_", "").replace(".", "") + "ShaderDX9"
fd.write("\n\n")
fd.write("#include \"drivers/directx9/shader_dx9.h\"\n\n\n")
fd.write("class " + out_file_class + " : public ShaderDX9 {\n\n")
fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n")
fd.write("public:\n\n")
if (len(conditionals)):
fd.write("\tenum Conditionals {\n")
for x in conditionals:
fd.write("\t\t" + x + ",\n")
fd.write("\t};\n\n")
if (len(uniforms)):
fd.write("\tenum Uniforms {\n")
for x in uniforms:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
if (len(conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n")
fd.write("\t#define _FU if (!_uniform_valid(p_uniform)) return; ERR_FAIL_COND( get_active()!=this );\n\n ")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, bool p_value) { _FU set_uniformb(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU set_uniformf(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU set_uniformi(p_uniform,p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU float col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; set_uniformfv(p_uniform,col); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU float vec2[4]={p_vec2.x,p_vec2.y,0,0}; set_uniformfv(p_uniform,vec2); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU float vec3[4]={p_vec3.x,p_vec3.y,p_vec3.z,0}; set_uniformfv(p_uniform,vec3); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU float vec2[4]={p_a,p_b,0,0}; set_uniformfv(p_uniform,vec2); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU float vec3[4]={p_a,p_b,p_c,0}; set_uniformfv(p_uniform,vec3); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU float vec4[4]={p_a,p_b,p_c,p_d}; set_uniformfv(p_uniform,vec4); }\n\n")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
float matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[0][1],
tr.basis.elements[0][2],
tr.origin.x,
tr.basis.elements[1][0],
tr.basis.elements[1][1],
tr.basis.elements[1][2],
tr.origin.y,
tr.basis.elements[2][0],
tr.basis.elements[2][1],
tr.basis.elements[2][2],
tr.origin.z,
0,
0,
0,
1
};
set_uniformfv(p_uniform,&matrix[0],4);
}
""")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
float matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[j][i];
}
}
set_uniformfv(p_uniform,&matrix[0],4);
}; """)
fd.write("\n\n#undef _FU\n\n\n")
fd.write("\tvirtual void init(IDirect3DDevice9 *p_device,ShaderSupport p_version) {\n\n")
if (len(conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(conditionals)):
for x in conditionals:
fd.write("\t\t\t\"" + x + "\",\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(uniforms)):
for x in uniforms:
fd.write("\t\t\t\"" + x + "\",\n")
fd.write("\t\t};\n\n")
fd.write("\t\tstatic const bool _fragment_uniforms[]={\n")
if (len(uniforms)):
for x in fragment_uniforms:
if (x):
fd.write("\t\t\ttrue,\n")
else:
fd.write("\t\t\tfalse,\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
fd.write("\t\tstatic const bool *_fragment_uniforms=NULL;\n")
fd.write("\t\tstatic const char* _vertex_code=\"\\\n")
for x in vertex_lines:
fd.write("\t\t\t" + x + "\n")
fd.write("\t\t\";\n\n")
fd.write("\t\tstatic const int _vertex_code_start=" + str(vertex_offset) + ";\n")
fd.write("\t\tstatic const char* _fragment_code=\"\\\n")
for x in fragment_lines:
fd.write("\t\t\t" + x + "\n")
fd.write("\t\t\";\n\n")
fd.write("\t\tstatic const int _fragment_code_start=" + str(fragment_offset) + ";\n")
fd.write("\t\tsetup(p_device,p_version,_conditional_strings," + str(len(conditionals)) + ",_uniform_strings," + str(len(uniforms)) + ",_fragment_uniforms,_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
fd.write("};\n\n")
fd.write("#endif\n\n")
fd.close()
def build_hlsl_dx9_headers(target, source, env):
for x in source:
build_hlsl_dx9_header(str(x))
return 0
class LegacyGLHeaderStruct:
def __init__(self):
self.vertex_lines = []
self.fragment_lines = []
self.uniforms = []
self.attributes = []
self.feedbacks = []
self.fbos = []
self.conditionals = []
self.enums = {}
self.texunits = []
self.texunit_names = []
self.ubos = []
self.ubo_names = []
self.vertex_included_files = []
self.fragment_included_files = []
self.reading = ""
self.line_offset = 0
self.vertex_offset = 0
self.fragment_offset = 0
def include_file_in_legacygl_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
while(line):
if (line.find("[vertex]") != -1):
header_data.reading = "vertex"
line = fs.readline()
header_data.line_offset += 1
header_data.vertex_offset = header_data.line_offset
continue
if (line.find("[fragment]") != -1):
header_data.reading = "fragment"
line = fs.readline()
header_data.line_offset += 1
header_data.fragment_offset = header_data.line_offset
continue
while(line.find("#include ") != -1):
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if (not included_file in header_data.vertex_included_files and header_data.reading == "vertex"):
header_data.vertex_included_files += [included_file]
if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None):
print "Error in file '" + filename + "': #include " + includeline + "could not be found!"
elif (not included_file in header_data.fragment_included_files and header_data.reading == "fragment"):
header_data.fragment_included_files += [included_file]
if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None):
print "Error in file '" + filename + "': #include " + includeline + "could not be found!"
line = fs.readline()
if (line.find("#ifdef ") != -1 or line.find("#elif defined(") != -1):
if (line.find("#ifdef ") != -1):
ifdefline = line.replace("#ifdef ", "").strip()
else:
ifdefline = line.replace("#elif defined(", "").strip()
ifdefline = ifdefline.replace(")", "").strip()
if (line.find("_EN_") != -1):
enumbase = ifdefline[:ifdefline.find("_EN_")]
ifdefline = ifdefline.replace("_EN_", "_")
line = line.replace("_EN_", "_")
# print(enumbase+":"+ifdefline);
if (enumbase not in header_data.enums):
header_data.enums[enumbase] = []
if (ifdefline not in header_data.enums[enumbase]):
header_data.enums[enumbase].append(ifdefline)
elif (not ifdefline in header_data.conditionals):
header_data.conditionals += [ifdefline]
if (line.find("uniform") != -1 and line.lower().find("texunit:") != -1):
# texture unit
texunitstr = line[line.find(":") + 1:].strip()
if (texunitstr == "auto"):
texunit = "-1"
else:
texunit = str(int(texunitstr))
uline = line[:line.lower().find("//")]
uline = uline.replace("uniform", "")
uline = uline.replace("highp", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.texunit_names):
header_data.texunits += [(x, texunit)]
header_data.texunit_names += [x]
elif (line.find("uniform") != -1 and line.lower().find("ubo:") != -1):
# uniform buffer object
ubostr = line[line.find(":") + 1:].strip()
ubo = str(int(ubostr))
uline = line[:line.lower().find("//")]
uline = uline[uline.find("uniform") + len("uniform"):]
uline = uline.replace("highp", "")
uline = uline.replace(";", "")
uline = uline.replace("{", "").strip()
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.ubo_names):
header_data.ubos += [(x, ubo)]
header_data.ubo_names += [x]
elif (line.find("uniform") != -1 and line.find("{") == -1 and line.find(";") != -1):
uline = line.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.uniforms):
header_data.uniforms += [x]
if (line.strip().find("attribute ") == 0 and line.find("attrib:") != -1):
uline = line.replace("in ", "")
uline = uline.replace("attribute ", "")
uline = uline.replace("highp ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("attrib:") != -1):
name = name.strip()
bind = bind.replace("attrib:", "").strip()
header_data.attributes += [(name, bind)]
if (line.strip().find("out ") == 0 and line.find("tfb:") != -1):
uline = line.replace("out ", "")
uline = uline.replace("highp ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("tfb:") != -1):
name = name.strip()
bind = bind.replace("tfb:", "").strip()
header_data.feedbacks += [(name, bind)]
line = line.replace("\r", "")
line = line.replace("\n", "")
# line=line.replace("\\","\\\\")
# line=line.replace("\"","\\\"")
# line=line+"\\n\\"
if (header_data.reading == "vertex"):
header_data.vertex_lines += [line]
if (header_data.reading == "fragment"):
header_data.fragment_lines += [line]
line = fs.readline()
header_data.line_offset += 1
fs.close()
return header_data
def build_legacygl_header(filename, include, class_suffix, output_attribs):
header_data = LegacyGLHeaderStruct()
include_file_in_legacygl_header(filename, header_data, 0)
out_file = filename + ".h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1:]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1:]
# print("out file "+out_file+" base " +out_file_base)
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + class_suffix + "_120\n")
fd.write("#define " + out_file_ifdef + class_suffix + "_120\n")
out_file_class = out_file_base.replace(".glsl.h", "").title().replace("_", "").replace(".", "") + "Shader" + class_suffix
fd.write("\n\n")
fd.write("#include \"" + include + "\"\n\n\n")
fd.write("class " + out_file_class + " : public Shader" + class_suffix + " {\n\n")
fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n")
fd.write("public:\n\n")
if (len(header_data.conditionals)):
fd.write("\tenum Conditionals {\n")
for x in header_data.conditionals:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
if (len(header_data.uniforms)):
fd.write("\tenum Uniforms {\n")
for x in header_data.uniforms:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n")
if (len(header_data.conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n")
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int64_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, unsigned long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
#fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, long p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n");
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU
const Transform2D &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}; """)
fd.write("\n\n#undef _FU\n\n\n")
fd.write("\tvirtual void init() {\n\n")
enum_value_count = 0
if (len(header_data.enums)):
fd.write("\t\t//Written using math, given nonstandarity of 64 bits integer constants..\n")
fd.write("\t\tstatic const Enum _enums[]={\n")
bitofs = len(header_data.conditionals)
enum_vals = []
for xv in header_data.enums:
x = header_data.enums[xv]
bits = 1
amt = len(x)
# print(x)
while(2**bits < amt):
bits += 1
# print("amount: "+str(amt)+" bits "+str(bits));
strs = "{"
for i in range(amt):
strs += "\"#define " + x[i] + "\\n\","
v = {}
v["set_mask"] = "uint64_t(" + str(i) + ")<<" + str(bitofs)
v["clear_mask"] = "((uint64_t(1)<<40)-1) ^ (((uint64_t(1)<<" + str(bits) + ") - 1)<<" + str(bitofs) + ")"
enum_vals.append(v)
enum_constants.append(x[i])
strs += "NULL}"
fd.write("\t\t\t{(uint64_t(1<<" + str(bits) + ")-1)<<" + str(bitofs) + "," + str(bitofs) + "," + strs + "},\n")
bitofs += bits
fd.write("\t\t};\n\n")
fd.write("\t\tstatic const EnumValue _enum_values[]={\n")
enum_value_count = len(enum_vals)
for x in enum_vals:
fd.write("\t\t\t{" + x["set_mask"] + "," + x["clear_mask"] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const Enum *_enums=NULL;\n")
fd.write("\t\tstatic const EnumValue *_enum_values=NULL;\n")
conditionals_found = []
if (len(header_data.conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(header_data.conditionals)):
for x in header_data.conditionals:
fd.write("\t\t\t\"#define " + x + "\\n\",\n")
conditionals_found.append(x)
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(header_data.uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(header_data.uniforms)):
for x in header_data.uniforms:
fd.write("\t\t\t\"" + x + "\",\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if output_attribs:
if (len(header_data.attributes)):
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in header_data.attributes:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
feedback_count = 0
if (len(header_data.feedbacks)):
fd.write("\t\tstatic const Feedback _feedbacks[]={\n")
for x in header_data.feedbacks:
name = x[0]
cond = x[1]
if (cond in conditionals_found):
fd.write("\t\t\t{\"" + name + "\"," + str(conditionals_found.index(cond)) + "},\n")
else:
fd.write("\t\t\t{\"" + name + "\",-1},\n")
feedback_count += 1
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const Feedback* _feedbacks=NULL;\n")
if (len(header_data.texunits)):
fd.write("\t\tstatic TexUnitPair _texunit_pairs[]={\n")
for x in header_data.texunits:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic TexUnitPair *_texunit_pairs=NULL;\n")
if (len(header_data.ubos)):
fd.write("\t\tstatic UBOPair _ubo_pairs[]={\n")
for x in header_data.ubos:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic UBOPair *_ubo_pairs=NULL;\n")
fd.write("\t\tstatic const char _vertex_code[]={\n")
for x in header_data.vertex_lines:
for i in range(len(x)):
fd.write(str(ord(x[i])) + ",")
fd.write(str(ord('\n')) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const int _vertex_code_start=" + str(header_data.vertex_offset) + ";\n")
fd.write("\t\tstatic const char _fragment_code[]={\n")
for x in header_data.fragment_lines:
for i in range(len(x)):
fd.write(str(ord(x[i])) + ",")
fd.write(str(ord('\n')) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const int _fragment_code_start=" + str(header_data.fragment_offset) + ";\n")
if output_attribs:
fd.write("\t\tsetup(_conditional_strings," + str(len(header_data.conditionals)) + ",_uniform_strings," + str(len(header_data.uniforms)) + ",_attribute_pairs," + str(len(header_data.attributes)) + ", _texunit_pairs," + str(len(header_data.texunits)) + ",_ubo_pairs," + str(len(header_data.ubos)) + ",_feedbacks," + str(feedback_count) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
else:
fd.write("\t\tsetup(_conditional_strings," + str(len(header_data.conditionals)) + ",_uniform_strings," + str(len(header_data.uniforms)) + ",_texunit_pairs," + str(len(header_data.texunits)) + ",_enums," + str(len(header_data.enums)) + ",_enum_values," + str(enum_value_count) + ",_ubo_pairs," + str(len(header_data.ubos)) + ",_feedbacks," + str(feedback_count) + ",_vertex_code,_fragment_code,_vertex_code_start,_fragment_code_start);\n")
fd.write("\t};\n\n")
if (len(enum_constants)):
fd.write("\tenum EnumConditionals {\n")
for x in enum_constants:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\tvoid set_enum_conditional(EnumConditionals p_cond) { _set_enum_conditional(p_cond); }\n")
fd.write("};\n\n")
fd.write("#endif\n\n")
fd.close()
def build_legacygl_headers(target, source, env):
for x in source:
build_legacygl_header(str(x), include="drivers/legacygl/shader_lgl.h", class_suffix="LGL", output_attribs=False)
return 0
def build_gles2_headers(target, source, env):
for x in source:
build_legacygl_header(str(x), include="drivers/gles2/shader_gles2.h", class_suffix="GLES2", output_attribs=True)
def build_gles3_headers(target, source, env):
for x in source:
build_legacygl_header(str(x), include="drivers/gles3/shader_gles3.h", class_suffix="GLES3", output_attribs=True)
def update_version():
rev = "custom_build"
if (os.getenv("BUILD_REVISION") != None):
rev = os.getenv("BUILD_REVISION")
print("Using custom revision: " + rev)
import version
f = open("core/version_generated.h", "wb")
f.write("#define VERSION_SHORT_NAME " + str(version.short_name) + "\n")
f.write("#define VERSION_NAME " + str(version.name) + "\n")
f.write("#define VERSION_MAJOR " + str(version.major) + "\n")
f.write("#define VERSION_MINOR " + str(version.minor) + "\n")
if (hasattr(version, 'patch')):
f.write("#define VERSION_PATCH " + str(version.patch) + "\n")
f.write("#define VERSION_REVISION " + str(rev) + "\n")
f.write("#define VERSION_STATUS " + str(version.status) + "\n")
import datetime
f.write("#define VERSION_YEAR " + str(datetime.datetime.now().year) + "\n")
def parse_cg_file(fname, uniforms, sizes, conditionals):
import re
fs = open(fname, "r")
line = fs.readline()
while line:
if re.match(r"^\s*uniform", line):
res = re.match(r"uniform ([\d\w]*) ([\d\w]*)")
type = res.groups(1)
name = res.groups(2)
uniforms.append(name)
if (type.find("texobj") != -1):
sizes.append(1)
else:
t = re.match(r"float(\d)x(\d)", type)
if t:
sizes.append(int(t.groups(1)) * int(t.groups(2)))
else:
t = re.match(r"float(\d)", type)
sizes.append(int(t.groups(1)))
if line.find("[branch]") != -1:
conditionals.append(name)
line = fs.readline()
def build_cg_shader(sname):
vp_uniforms = []
vp_uniform_sizes = []
vp_conditionals = []
parse_cg_file("vp_" + sname + ".cg", vp_uniforms, vp_uniform_sizes, vp_conditionals)
fp_uniforms = []
fp_uniform_sizes = []
fp_conditionals = []
parse_cg_file("fp_" + sname + ".cg", fp_uniforms, fp_uniform_sizes, fp_conditionals)
fd = open("shader_" + sname + ".cg.h", "w")
fd.write('\n#include "shader_cell.h"\n')
fd.write("\nclass Shader_" + sname + " : public ShaderCell {\n")
fd.write("\n\tstatic struct VertexUniforms[] = {\n")
offset = 0
for i in range(0, len(vp_uniforms)):
fd.write('\t\t{ "%s", %d, %d },\n' % (vp_uniforms[i], offset, vp_uniform_sizes[i]))
offset = offset + vp_uniform_sizes[i]
fd.write("\t};\n\n")
fd.write("public:\n\n")
fd.write("\tenum {\n")
for i in range(0, len(vp_uniforms)):
fd.write('\t\tVP_%s,\n' % vp_uniforms[i].upper())
fd.write("\t};\n")
import glob
def detect_modules():
module_list = []
includes_cpp = ""
register_cpp = ""
unregister_cpp = ""
files = glob.glob("modules/*")
files.sort() # so register_module_types does not change that often, and also plugins are registered in alphabetic order
for x in files:
if (not os.path.isdir(x)):
continue
x = x.replace("modules/", "") # rest of world
x = x.replace("modules\\", "") # win32
module_list.append(x)
try:
with open("modules/" + x + "/register_types.h"):
includes_cpp += '#include "modules/' + x + '/register_types.h"\n'
register_cpp += '#ifdef MODULE_' + x.upper() + '_ENABLED\n'
register_cpp += '\tregister_' + x + '_types();\n'
register_cpp += '#endif\n'
unregister_cpp += '#ifdef MODULE_' + x.upper() + '_ENABLED\n'
unregister_cpp += '\tunregister_' + x + '_types();\n'
unregister_cpp += '#endif\n'
except IOError:
pass
modules_cpp = """
// modules.cpp - THIS FILE IS GENERATED, DO NOT EDIT!!!!!!!
#include "register_module_types.h"
""" + includes_cpp + """
void register_module_types() {
""" + register_cpp + """
}
void unregister_module_types() {
""" + unregister_cpp + """
}
"""
f = open("modules/register_module_types.cpp", "wb")
f.write(modules_cpp)
return module_list
def win32_spawn(sh, escape, cmd, args, env):
import subprocess
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
#startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
for e in env:
if type(env[e]) != type(""):
env[e] = str(env[e])
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
"""
def win32_spawn(sh, escape, cmd, args, spawnenv):
import win32file
import win32event
import win32process
import win32security
for var in spawnenv:
spawnenv[var] = spawnenv[var].encode('ascii', 'replace')
sAttrs = win32security.SECURITY_ATTRIBUTES()
StartupInfo = win32process.STARTUPINFO()
newargs = ' '.join(map(escape, args[1:]))
cmdline = cmd + " " + newargs
# check for any special operating system commands
if cmd == 'del':
for arg in args[1:]:
win32file.DeleteFile(arg)
exit_code = 0
else:
# otherwise execute the command.
hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(None, cmdline, None, None, 1, 0, spawnenv, None, StartupInfo)
win32event.WaitForSingleObject(hProcess, win32event.INFINITE)
exit_code = win32process.GetExitCodeProcess(hProcess)
win32file.CloseHandle(hProcess);
win32file.CloseHandle(hThread);
return exit_code
"""
def android_add_maven_repository(self, url):
if (url not in self.android_maven_repos):
self.android_maven_repos.append(url)
def android_add_dependency(self, depline):
if (depline not in self.android_dependencies):
self.android_dependencies.append(depline)
def android_add_java_dir(self, subpath):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath
if (base_path not in self.android_java_dirs):
self.android_java_dirs.append(base_path)
def android_add_res_dir(self, subpath):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath
if (base_path not in self.android_res_dirs):
self.android_res_dirs.append(base_path)
def android_add_aidl_dir(self, subpath):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath
if (base_path not in self.android_aidl_dirs):
self.android_aidl_dirs.append(base_path)
def android_add_jni_dir(self, subpath):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + subpath
if (base_path not in self.android_jni_dirs):
self.android_jni_dirs.append(base_path)
def android_add_gradle_plugin(self, plugin):
if (plugin not in self.android_gradle_plugins):
self.android_gradle_plugins.append(plugin)
def android_add_gradle_classpath(self, classpath):
if (classpath not in self.android_gradle_classpath):
self.android_gradle_classpath.append(classpath)
def android_add_default_config(self, config):
if (config not in self.android_default_config):
self.android_default_config.append(config)
def android_add_to_manifest(self, file):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file
f = open(base_path, "rb")
self.android_manifest_chunk += f.read()
def android_add_to_permissions(self, file):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file
f = open(base_path, "rb")
self.android_permission_chunk += f.read()
def android_add_to_attributes(self, file):
base_path = self.Dir(".").abspath + "/modules/" + self.current_module + "/" + file
f = open(base_path, "rb")
self.android_appattributes_chunk += f.read()
def disable_module(self):
self.disabled_modules.append(self.current_module)
def use_windows_spawn_fix(self, platform=None):
if (os.name != "nt"):
return # not needed, only for windows
# On Windows, due to the limited command line length, when creating a static library
# from a very high number of objects SCons will invoke "ar" once per object file;
# that makes object files with same names to be overwritten so the last wins and
# the library looses symbols defined by overwritten objects.
# By enabling quick append instead of the default mode (replacing), libraries will
# got built correctly regardless the invokation strategy.
# Furthermore, since SCons will rebuild the library from scratch when an object file
# changes, no multiple versions of the same object file will be present.
self.Replace(ARFLAGS='q')
import subprocess
def mySubProcess(cmdline, env):
prefix = ""
if(platform == 'javascript'):
prefix = "python.exe "
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(prefix + cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
env = {str(key): str(value) for key, value in env.iteritems()}
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
self['SPAWN'] = mySpawn
def split_lib(self, libname):
import string
env = self
num = 0
cur_base = ""
max_src = 64
list = []
lib_list = []
for f in getattr(env, libname + "_sources"):
fname = ""
if type(f) == type(""):
fname = env.File(f).path
else:
fname = env.File(f)[0].path
fname = fname.replace("\\", "/")
base = string.join(fname.split("/")[:2], "/")
if base != cur_base and len(list) > max_src:
if num > 0:
lib = env.Library(libname + str(num), list)
lib_list.append(lib)
list = []
num = num + 1
cur_base = base
list.append(f)
lib = env.Library(libname + str(num), list)
lib_list.append(lib)
if len(lib_list) > 0:
import os, sys
if os.name == 'posix' and sys.platform == 'msys':
env.Replace(ARFLAGS=['rcsT'])
lib = env.Library(libname + "_collated", lib_list)
lib_list = [lib]
lib_base = []
env.add_source_files(lib_base, "*.cpp")
lib_list.insert(0, env.Library(libname, lib_base))
env.Prepend(LIBS=lib_list)
def save_active_platforms(apnames, ap):
for x in ap:
pth = x + "/logo.png"
# print("open path: "+pth)
pngf = open(pth, "rb")
b = pngf.read(1)
str = " /* AUTOGENERATED FILE, DO NOT EDIT */ \n"
str += " static const unsigned char _" + x[9:] + "_logo[]={"
while(len(b) == 1):
str += hex(ord(b))
b = pngf.read(1)
if (len(b) == 1):
str += ","
str += "};\n"
wf = x + "/logo.h"
logow = open(wf, "wb")
logow.write(str)
def no_verbose(sys, env):
# If the output is not a terminal, do nothing
if not sys.stdout.isatty():
return
colors = {}
colors['cyan'] = '\033[96m'
colors['purple'] = '\033[95m'
colors['blue'] = '\033[94m'
colors['green'] = '\033[92m'
colors['yellow'] = '\033[93m'
colors['red'] = '\033[91m'
colors['end'] = '\033[0m'
compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
java_compile_source_message = '%sCompiling %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
compile_shared_source_message = '%sCompiling shared %s==> %s$SOURCE%s' % (colors['blue'], colors['purple'], colors['yellow'], colors['end'])
link_program_message = '%sLinking Program %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
link_library_message = '%sLinking Static Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
ranlib_library_message = '%sRanlib Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
link_shared_library_message = '%sLinking Shared Library %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
java_library_message = '%sCreating Java Archive %s==> %s$TARGET%s' % (colors['red'], colors['purple'], colors['yellow'], colors['end'])
env.Append(CXXCOMSTR=[compile_source_message])
env.Append(CCCOMSTR=[compile_source_message])
env.Append(SHCCCOMSTR=[compile_shared_source_message])
env.Append(SHCXXCOMSTR=[compile_shared_source_message])
env.Append(ARCOMSTR=[link_library_message])
env.Append(RANLIBCOMSTR=[ranlib_library_message])
env.Append(SHLINKCOMSTR=[link_shared_library_message])
env.Append(LINKCOMSTR=[link_program_message])
env.Append(JARCOMSTR=[java_library_message])
env.Append(JAVACCOMSTR=[java_compile_source_message])
def detect_visual_c_compiler_version(tools_env):
# tools_env is the variable scons uses to call tools that execute tasks, SCons's env['ENV'] that executes tasks...
# (see the SCons documentation for more information on what it does)...
# in order for this function to be well encapsulated i choose to force it to receive SCons's TOOLS env (env['ENV']
# and not scons setup environment (env)... so make sure you call the right environment on it or it will fail to detect
# the proper vc version that will be called
# These is no flag to give to visual c compilers to set the architecture, ie scons bits argument (32,64,ARM etc)
# There are many different cl.exe files that are run, and each one compiles & links to a different architecture
# As far as I know, the only way to figure out what compiler will be run when Scons calls cl.exe via Program()
# is to check the PATH varaible and figure out which one will be called first. Code bellow does that and returns:
# the following string values:
# "" Compiler not detected
# "amd64" Native 64 bit compiler
# "amd64_x86" 64 bit Cross Compiler for 32 bit
# "x86" Native 32 bit compiler
# "x86_amd64" 32 bit Cross Compiler for 64 bit
# There are other architectures, but Godot does not support them currently, so this function does not detect arm/amd64_arm
# and similar architectures/compilers
# Set chosen compiler to "not detected"
vc_chosen_compiler_index = -1
vc_chosen_compiler_str = ""
# Start with Pre VS 2017 checks which uses VCINSTALLDIR:
if 'VCINSTALLDIR' in tools_env:
# print "Checking VCINSTALLDIR"
# find() works with -1 so big ifs bellow are needed... the simplest solution, in fact
# First test if amd64 and amd64_x86 compilers are present in the path
vc_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64;")
if(vc_amd64_compiler_detection_index > -1):
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64_x86;")
if(vc_amd64_x86_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index)):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
# Now check the 32 bit compilers
vc_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN;")
if(vc_x86_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_x86_compiler_detection_index)):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env['VCINSTALLDIR'] + "BIN\\x86_amd64;")
if(vc_x86_amd64_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index)):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
# and for VS 2017 and newer we check VCTOOLSINSTALLDIR:
if 'VCTOOLSINSTALLDIR' in tools_env:
# print "Checking VCTOOLSINSTALLDIR"
# Newer versions have a different path available
vc_amd64_compiler_detection_index = tools_env["PATH"].upper().find(tools_env['VCTOOLSINSTALLDIR'].upper() + "BIN\\HOSTX64\\X64;")
if(vc_amd64_compiler_detection_index > -1):
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = tools_env["PATH"].upper().find(tools_env['VCTOOLSINSTALLDIR'].upper() + "BIN\\HOSTX64\\X86;")
if(vc_amd64_x86_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index)):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
vc_x86_compiler_detection_index = tools_env["PATH"].upper().find(tools_env['VCTOOLSINSTALLDIR'].upper() + "BIN\\HOSTX86\\X86;")
if(vc_x86_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_x86_compiler_detection_index)):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = tools_env["PATH"].upper().find(tools_env['VCTOOLSINSTALLDIR'].upper() + "BIN\\HOSTX86\\X64;")
if(vc_x86_amd64_compiler_detection_index > -1
and (vc_chosen_compiler_index == -1
or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index)):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
# debug help
# print vc_amd64_compiler_detection_index
# print vc_amd64_x86_compiler_detection_index
# print vc_x86_compiler_detection_index
# print vc_x86_amd64_compiler_detection_index
# print "chosen "+str(vc_chosen_compiler_index)+ " | "+str(vc_chosen_compiler_str)
return vc_chosen_compiler_str
def precious_program(env, program, sources, **args):
program = env.ProgramOriginal(program, sources, **args)
env.Precious(program)
return program
|
Balachan27/django
|
refs/heads/master
|
django/contrib/gis/db/models/aggregates.py
|
414
|
from django.contrib.gis.db.models.fields import ExtentField
from django.db.models.aggregates import Aggregate
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
def as_sql(self, compiler, connection):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
self.function = connection.ops.spatial_aggregate_name(self.name)
return super(GeoAggregate, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if not hasattr(self, 'tolerance'):
self.tolerance = 0.05
self.extra['tolerance'] = self.tolerance
if not self.is_extent:
self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(GeoAggregate, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_geom(value, self.output_field)
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super(Extent, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent(value, context.get('transformed_srid'))
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super(Extent3D, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent3d(value, context.get('transformed_srid'))
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
|
qsnake/gpaw
|
refs/heads/master
|
oldtest/spline.py
|
3
|
from gpaw.spline import Spline
import numpy as np
a = np.array([1, 0.9, 0.1, 0.0])
s = Spline(0, 2.0, a)
dx = 0.0001
for x in [0.5, 1, 1.2, 3]:
y, dydx = s.get_value_and_derivative(x)
z = (s(x + dx) - s(x - dx)) / (2 * dx)
print y, dydx - z
assert abs(dydx - z) < 1e-7
|
celebritycoin/CelebrityCoin
|
refs/heads/master
|
share/qt/extract_strings_qt.py
|
2945
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
DonBeo/scikit-learn
|
refs/heads/master
|
sklearn/cluster/affinity_propagation_.py
|
21
|
""" Algorithms for clustering : Meanshift, Affinity propagation and spectral
clustering.
"""
# Author: Alexandre Gramfort alexandre.gramfort@inria.fr
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
srware/upm
|
refs/heads/master
|
examples/python/grovecollision.py
|
16
|
#!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_grovecollision as upmGrovecollision
# The was tested with the Grove Collision Sensor
# Instantiate a Grove Collision on digital pin D2
myGrovecollision = upmGrovecollision.GroveCollision(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myGrovecollision
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
collisionState = False
print "No collision"
while(1):
if (myGrovecollision.isColliding() and not collisionState):
print "Collision!"
collisionState = True
elif (not myGrovecollision.isColliding() and collisionState):
print "No collision"
collisionState = False
|
kdebrab/pandas
|
refs/heads/master
|
pandas/tests/io/test_clipboard.py
|
2
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.compat import PY2
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set, clipboard_get
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except (PyperclipException, RuntimeError):
_DEPS_INSTALLED = 0
def build_kwargs(sep, excel):
kwargs = {}
if excel != 'default':
kwargs['excel'] = excel
if sep != 'default':
kwargs['sep'] = sep
return kwargs
@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii',
'colwidth', 'mixed', 'float', 'int'])
def df(request):
data_type = request.param
if data_type == 'delims':
return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'],
'b': ['hi\'j', 'k\'\'lm']})
elif data_type == 'utf8':
return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
elif data_type == 'string':
return mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'long':
max_rows = get_option('display.max_rows')
return mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'nonascii':
return pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
elif data_type == 'colwidth':
_cw = get_option('display.max_colwidth') + 1
return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'mixed':
return DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
elif data_type == 'float':
return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
elif data_type == 'int':
return mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
else:
raise ValueError
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
def check_round_trip_frame(self, data, excel=None, sep=None,
encoding=None):
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
result = read_clipboard(sep=sep or '\t', index_col=0,
encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
# Test that default arguments copy as tab delimited
def test_round_trip_frame(self, df):
self.check_round_trip_frame(df)
# Test that explicit delimiters are respected
@pytest.mark.parametrize('sep', ['\t', ',', '|'])
def test_round_trip_frame_sep(self, df, sep):
self.check_round_trip_frame(df, sep=sep)
# Test white space separator
def test_round_trip_frame_string(self, df):
df.to_clipboard(excel=False, sep=None)
result = read_clipboard()
assert df.to_string() == result.to_string()
assert df.shape == result.shape
# Two character separator is not supported in to_clipboard
# Test that multi-character separators are not silently passed
def test_excel_sep_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=True, sep=r'\t')
# Separator is ignored when excel=False and should produce a warning
def test_copy_delim_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=False, sep='\t')
# Tests that the default behavior of to_clipboard is tab
# delimited and excel="True"
@pytest.mark.parametrize('sep', ['\t', None, 'default'])
@pytest.mark.parametrize('excel', [True, None, 'default'])
def test_clipboard_copy_tabs_default(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
if PY2:
# to_clipboard copies unicode, to_csv produces bytes. This is
# expected behavior
assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t')
else:
assert clipboard_get() == df.to_csv(sep='\t')
# Tests reading of white space separated tables
@pytest.mark.parametrize('sep', [None, 'default'])
@pytest.mark.parametrize('excel', [False])
def test_clipboard_copy_strings(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
result = read_clipboard(sep=r'\s+')
assert result.to_string() == df.to_string()
assert df.shape == result.shape
def test_read_clipboard_infer_excel(self):
# gh-19010: avoid warnings
clip_kwargs = dict(engine="python")
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard(**clip_kwargs)
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self, df):
# test case for testing invalid encoding
with pytest.raises(ValueError):
df.to_clipboard(encoding='ascii')
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
@pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8'])
def test_round_trip_valid_encodings(self, enc, df):
self.check_round_trip_frame(df, encoding=enc)
|
uucidl/hammer
|
refs/heads/master
|
src/bindings/cpp/vendor/gtest-1.7.0/test/gtest_shuffle_test.py
|
3023
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
BrotherPhil/django
|
refs/heads/master
|
django/contrib/gis/gdal/__init__.py
|
327
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
gerardosteel/Proyectofinal
|
refs/heads/master
|
proyectofinal/vendor/psy/psysh/test/tools/vis.py
|
710
|
"""
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
|
kun--hust/sccloud
|
refs/heads/master
|
swift/account/utils.py
|
17
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from xml.sax import saxutils
from swift.common.swob import HTTPOk, HTTPNoContent
from swift.common.utils import json, Timestamp
from swift.common.storage_policy import POLICIES
class FakeAccountBroker(object):
"""
Quacks like an account broker, but doesn't actually do anything. Responds
like an account broker would for a real, empty account with no metadata.
"""
def get_info(self):
now = Timestamp(time.time()).internal
return {'container_count': 0,
'object_count': 0,
'bytes_used': 0,
'created_at': now,
'put_timestamp': now}
def list_containers_iter(self, *_, **__):
return []
@property
def metadata(self):
return {}
def get_policy_stats(self):
return {}
def get_response_headers(broker):
info = broker.get_info()
resp_headers = {
'X-Account-Container-Count': info['container_count'],
'X-Account-Object-Count': info['object_count'],
'X-Account-Bytes-Used': info['bytes_used'],
'X-Timestamp': Timestamp(info['created_at']).normal,
'X-PUT-Timestamp': Timestamp(info['put_timestamp']).normal}
policy_stats = broker.get_policy_stats()
for policy_idx, stats in policy_stats.items():
policy = POLICIES.get_by_index(policy_idx)
if not policy:
continue
header_prefix = 'X-Account-Storage-Policy-%s-%%s' % policy.name
for key, value in stats.items():
header_name = header_prefix % key.replace('_', '-')
resp_headers[header_name] = value
resp_headers.update((key, value)
for key, (value, timestamp) in
broker.metadata.iteritems() if value != '')
return resp_headers
def account_listing_response(account, req, response_content_type, broker=None,
limit='', marker='', end_marker='', prefix='',
delimiter=''):
if broker is None:
broker = FakeAccountBroker()
resp_headers = get_response_headers(broker)
account_list = broker.list_containers_iter(limit, marker, end_marker,
prefix, delimiter)
if response_content_type == 'application/json':
data = []
for (name, object_count, bytes_used, is_subdir) in account_list:
if is_subdir:
data.append({'subdir': name})
else:
data.append({'name': name, 'count': object_count,
'bytes': bytes_used})
account_list = json.dumps(data)
elif response_content_type.endswith('/xml'):
output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
'<account name=%s>' % saxutils.quoteattr(account)]
for (name, object_count, bytes_used, is_subdir) in account_list:
if is_subdir:
output_list.append(
'<subdir name=%s />' % saxutils.quoteattr(name))
else:
item = '<container><name>%s</name><count>%s</count>' \
'<bytes>%s</bytes></container>' % \
(saxutils.escape(name), object_count, bytes_used)
output_list.append(item)
output_list.append('</account>')
account_list = '\n'.join(output_list)
else:
if not account_list:
resp = HTTPNoContent(request=req, headers=resp_headers)
resp.content_type = response_content_type
resp.charset = 'utf-8'
return resp
account_list = '\n'.join(r[0] for r in account_list) + '\n'
ret = HTTPOk(body=account_list, request=req, headers=resp_headers)
ret.content_type = response_content_type
ret.charset = 'utf-8'
return ret
|
Dapid/scipy
|
refs/heads/master
|
scipy/io/harwell_boeing/__init__.py
|
155
|
from __future__ import division, print_function, absolute_import
from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \
HBMatrixType, hb_read, hb_write
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/plugin.video.neptune-1.2.2/resources/lib/modules/dom_parser.py
|
35
|
"""
Based on Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from collections import namedtuple
DomMatch = namedtuple('DOMMatch', ['attrs', 'content'])
re_type = type(re.compile(''))
def __get_dom_content(html, name, match):
if match.endswith('/>'): return ''
# override tag name with tag from match if possible
tag = re.match('<([^\s/>]+)', match)
if tag: name = tag.group(1)
start_str = '<%s' % name
end_str = "</%s" % name
# start/end tags without matching case cause issues
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
return result
def __get_dom_elements(item, name, attrs):
if not attrs:
pattern = '(<%s(?:\s[^>]*>|/?>))' % name
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key, value in attrs.iteritems():
value_is_regex = isinstance(value, re_type)
value_is_str = isinstance(value, basestring)
pattern = '''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[2])]
else:
temp_value = [value] if value_is_str else value
this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))]
if not this_list:
has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value)
if not has_space:
pattern = '''(<{tag}[^>]*\s{key}=((?:[^\s>]|/>)*)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[1])]
else:
this_list = [r[0] for r in re_list if value == r[1]]
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def __get_attribs(element):
attribs = {}
for match in re.finditer('''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element):
match = match.groupdict()
value1 = match.get('value1')
value2 = match.get('value2')
value = value1 if value1 is not None else value2
if value is None: continue
attribs[match['key'].lower().strip()] = value
return attribs
def parse_dom(html, name='', attrs=None, req=False, exclude_comments=False):
if attrs is None: attrs = {}
name = name.strip()
if isinstance(html, unicode) or isinstance(html, DomMatch):
html = [html]
elif isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif not isinstance(html, list):
return ''
if not name:
return ''
if not isinstance(attrs, dict):
return ''
if req:
if not isinstance(req, list):
req = [req]
req = set([key.lower() for key in req])
all_results = []
for item in html:
if isinstance(item, DomMatch):
item = item.content
if exclude_comments:
item = re.sub(re.compile('<!--.*?-->', re.DOTALL), '', item)
results = []
for element in __get_dom_elements(item, name, attrs):
attribs = __get_attribs(element)
if req and not req <= set(attribs.keys()): continue
temp = __get_dom_content(item, name, element).strip()
results.append(DomMatch(attribs, temp))
item = item[item.find(temp, item.find(element)):]
all_results += results
return all_results
|
tovrstra/horton
|
refs/heads/master
|
tools/qa/install_deps_extra.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Installs extra dependencies.
Run all the install scripts in the proper order. After each install, the
corresponding environment vars are sourced to make sure the following dependencies
can be built properly.
"""
import json
import os
def main():
"""Main program."""
# Load the dependencies data
with open('dependencies.json') as f:
dependencies = json.load(f)
# Install each with an install_command line.
for d in dependencies:
install_command = d.get('install_command')
if install_command is not None:
print install_command
os.system(install_command)
if __name__ == '__main__':
main()
|
lintzc/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/mpp18932/steps/__init__.py
|
9
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from time import sleep
import tinctest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from tinctest.lib import run_shell_command
from mpp.gpdb.tests.storage.lib import Disk
class Steps():
def __init__(self):
self.disk = Disk()
def get_segments(self):
get_seg_sql = "select distinct(hostname) from gp_segment_configuration;"
res = PSQL.run_sql_command(get_seg_sql, dbname = 'template1', flags ='-q -t')
return res.strip().split('\n')
def checkDiskUsage(self, host, disk): # This now checks for only /data
(rc, result) = self.disk.get_disk_usage(host, '/'+disk)
if rc != 0:
raise Exception ("The specified mount /data is not present for the device")
else:
available_usage = result
return available_usage
def fillDisk(self, filename, host):
cmd_prefix = "ssh " +host+ " \""
cmd_postfix = "\""
location = os.getcwd()
if not os.path.isdir('%s/diskfill/' % location):
os.makedirs('%s/diskfill/' % location)
cmd_str = cmd_prefix + "dd if=/dev/zero bs=1024K count=1000 of=" +location+ "/diskfill/" + filename +cmd_postfix
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
if int(results['rc']) !=0:
raise Exception('disk fill not working')
def changetracking(self, type = 'mirror'):
''' Routine to inject fault that places system in change tracking'''
tinctest.logger.info("Put system in changetracking ")
cmd_str = 'gpfaultinjector -f filerep_consumer -m async -y fault -r %s -H ALL' %type
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
return results['stdout']
def is_dbdown(self):
'''return true if system is the system is down'''
tinctest.logger.info("checking if segments are up and in sync")
cmd_str = "select count(*) from gp_segment_configuration where mode = 'c'"
res = PSQL.run_sql_command(cmd_str, dbname = 'template1', flags ='-q -t')
if int(res.strip()) > 0:
return True
return False
def checkLogMessage(self):
''' Select from gp_toolkit log message to see if the concurrent test run resulted in No space left on device'''
log_sql = "select substring(logmessage from 0 for 60) from gp_toolkit.__gp_log_segment_ext where logmessage like '%disabled%';"
for i in range(1,25):
result = PSQL.run_sql_command(log_sql, dbname = 'template1', flags ='-q -t')
result= res.strip().split('\n')
log_chk = False
for logmsg in result:
if ('write error for change tracking full log' in logmsg[0] ):
log_chk = True
break
if log_chk == True:
break
sleep(2)
if log_chk == False:
raise Exception("Message 'changetracking disabled' not found in the logs")
return True
def remove_fillfiles(self, filename, host):
location = os.getcwd()
cmd_str = "ssh %s rm %s/diskfill/%s*" % (host,location, filename)
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
if int(results['rc']) !=0:
raise Exception('Unable to delete the fill files')
return
def recover_segments(self):
'''call gprecoverseg -a to recover down segments'''
tinctest.logger.info("call gprecoverseg -a to recover down segments")
cmd_str = 'gprecoverseg -a'
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
|
megaumi/django
|
refs/heads/master
|
django/utils/deprecation.py
|
199
|
from __future__ import absolute_import
import inspect
import warnings
class RemovedInDjango20Warning(PendingDeprecationWarning):
pass
class RemovedInDjango110Warning(DeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango110Warning
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
|
niktre/espressopp
|
refs/heads/master
|
contrib/mpi4py/mpi4py-2.0.0/demo/mpi-ref-v1/ex-3.07.py
|
12
|
execfile('ex-3.02.py')
count = 2
blklen = 3
stride = 4 * dtype.extent
newtype = dtype.Create_hvector(count, blklen, stride)
assert newtype.size == dtype.size * count * blklen
dtype.Free()
newtype.Free()
|
tongchuanwei/alfred-workflow-objectid
|
refs/heads/master
|
process.py
|
2
|
__author__ = 'zephyre'
from datetime import datetime
import alfred
from bson import ObjectId
from bson.errors import InvalidId
def process(query_str):
""" Entry point """
query_str = query_str.strip().lower() if query_str else ''
results = []
if query_str == 'gen':
ret = gen_objectid_result()
results.append(ret)
oid = ret.attributes['arg']
results.extend(get_generation_time(oid))
else:
oid = parse_query_value(query_str)
if oid:
results.extend(get_generation_time(oid))
if results:
xml = alfred.xml(results) # compiles the XML answer
alfred.write(xml) # writes the XML back to Alfred
def get_generation_time(oid):
"""
Get the alfred result for the ObjectId generation time
:param oid:
:return:
"""
import pytz
from tzlocal import get_localzone
gt = oid.generation_time.replace(tzinfo=pytz.UTC)
t0 = pytz.UTC.localize(datetime.utcfromtimestamp(0))
tz_cn = pytz.timezone('Asia/Shanghai')
t1 = tz_cn.normalize(gt)
tz_local = get_localzone()
t2 = tz_local.normalize(gt)
total_seconds = int((gt - t0).total_seconds())
return [
alfred.Item(title=str(gt), subtitle='Generation time in UTC', icon='', attributes={'arg': gt}),
alfred.Item(title=str(t1), subtitle='Generation time in Asia/Shanghai', icon='', attributes={'arg': t1}),
alfred.Item(title=str(t2), subtitle='Generation time in local timezone: %s' % tz_local.zone, icon='',
attributes={'arg': t1}),
alfred.Item(title=str(total_seconds), subtitle='Unix Epoch Timestamp', icon='',
attributes={'arg': total_seconds})
]
def gen_objectid_result():
oid = ObjectId()
return alfred.Item(title=str(oid), subtitle='Generated ObjectId', icon='', attributes={'arg': oid})
def parse_query_value(query_str):
"""
Parse the query string. If the input is invalid or doesn't exist, None will be returned
:param query_str:
:return:
"""
try:
oid = ObjectId(query_str)
except InvalidId:
oid = None
return oid
# def alfred_items_for_value(oid):
# index = 0
# alfred_items = [
# alfred.Item(title=str(oid), subtitle='Original ObjectId', attributes={
# 'uid': alfred.uid(index),
# 'arg': oid,
# }, icon='icon.png')
# ]
# return alfred_items
if __name__ == '__main__':
try:
arg = alfred.args()[0]
except IndexError:
arg = None
process(arg)
|
waterponey/scikit-learn
|
refs/heads/master
|
sklearn/feature_selection/base.py
|
119
|
# -*- coding: utf-8 -*-
"""Generic feature selection mixin"""
# Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from warnings import warn
import numpy as np
from scipy.sparse import issparse, csc_matrix
from ..base import TransformerMixin
from ..utils import check_array, safe_mask
from ..externals import six
class SelectorMixin(six.with_metaclass(ABCMeta, TransformerMixin)):
"""
Tranformer mixin that performs feature selection given a support mask
This mixin provides a feature selector implementation with `transform` and
`inverse_transform` functionality given an implementation of
`_get_support_mask`.
"""
def get_support(self, indices=False):
"""
Get a mask, or integer index, of the features selected
Parameters
----------
indices : boolean (default False)
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
"""
mask = self._get_support_mask()
return mask if not indices else np.where(mask)[0]
@abstractmethod
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
def transform(self, X):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
X = check_array(X, accept_sparse='csr')
mask = self.get_support()
if not mask.any():
warn("No features were selected: either the data is"
" too noisy or the selection test too strict.",
UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""
Reverse the transformation operation
Parameters
----------
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by `transform`.
"""
if issparse(X):
X = X.tocsc()
# insert additional entries in indptr:
# e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
# col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
col_nonzeros = it.ravel()
indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
Xt = csc_matrix((X.data, X.indices, indptr),
shape=(X.shape[0], len(indptr) - 1), dtype=X.dtype)
return Xt
support = self.get_support()
X = check_array(X)
if support.sum() != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
Xt[:, support] = X
return Xt
|
maartenq/ansible
|
refs/heads/devel
|
test/runner/test.py
|
2
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Test runner for all Ansible tests."""
from __future__ import absolute_import, print_function
import errno
import os
import sys
from lib.util import (
ApplicationError,
display,
raw_command,
get_docker_completion,
generate_pip_command,
)
from lib.delegation import (
delegate,
)
from lib.executor import (
command_posix_integration,
command_network_integration,
command_windows_integration,
command_units,
command_shell,
SUPPORTED_PYTHON_VERSIONS,
ApplicationWarning,
Delegate,
generate_pip_install,
check_startup,
)
from lib.config import (
IntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
NetworkIntegrationConfig,
SanityConfig,
UnitsConfig,
ShellConfig,
)
from lib.sanity import (
command_sanity,
sanity_init,
sanity_get_tests,
)
from lib.target import (
find_target_completion,
walk_posix_integration_targets,
walk_network_integration_targets,
walk_windows_integration_targets,
walk_units_targets,
walk_sanity_targets,
)
from lib.core_ci import (
AWS_ENDPOINTS,
)
from lib.cloud import (
initialize_cloud_plugins,
)
import lib.cover
def main():
"""Main program function."""
try:
git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
os.chdir(git_root)
initialize_cloud_plugins()
sanity_init()
args = parse_args()
config = args.config(args)
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = (isinstance(config, SanityConfig) and config.lint) or (isinstance(config, IntegrationConfig) and config.list_targets)
check_startup()
try:
args.func(config)
except Delegate as ex:
delegate(config, ex.exclude, ex.require, ex.integration_targets)
display.review_warnings()
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
try:
import argparse
except ImportError:
if '--requirements' not in sys.argv:
raise
raw_command(generate_pip_install(generate_pip_command(sys.executable), 'ansible-test'))
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
if argcomplete:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(epilog=epilog)
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
common.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
common.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
common.add_argument('--debug',
action='store_true',
help='run ansible commands in debug mode')
common.add_argument('--truncate',
dest='truncate',
metavar='COLUMNS',
type=int,
default=display.columns,
help='truncate some long output (0=disabled) (default: auto)')
common.add_argument('--redact',
dest='redact',
action='store_true',
help='redact sensitive values in output')
test = argparse.ArgumentParser(add_help=False, parents=[common])
test.add_argument('include',
metavar='TARGET',
nargs='*',
help='test the specified target').completer = complete_target
test.add_argument('--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target').completer = complete_target
test.add_argument('--require',
metavar='TARGET',
action='append',
help='require the specified target').completer = complete_target
test.add_argument('--coverage',
action='store_true',
help='analyze code coverage when running tests')
test.add_argument('--coverage-label',
default='',
help='label to include in coverage output file names')
test.add_argument('--metadata',
help=argparse.SUPPRESS)
add_changes(test, argparse)
add_environments(test)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
integration.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
integration.add_argument('--start-at',
metavar='TARGET',
help='start at the specified target').completer = complete_target
integration.add_argument('--start-at-task',
metavar='TASK',
help='start at the specified task')
integration.add_argument('--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values')
integration.add_argument('--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values')
integration.add_argument('--diff',
action='store_true',
help='show diff output')
integration.add_argument('--allow-destructive',
action='store_true',
help='allow destructive tests (--local and --tox only)')
integration.add_argument('--allow-root',
action='store_true',
help='allow tests requiring root when not root')
integration.add_argument('--allow-disabled',
action='store_true',
help='allow tests which have been marked as disabled')
integration.add_argument('--allow-unstable',
action='store_true',
help='allow tests which have been marked as unstable')
integration.add_argument('--allow-unstable-changed',
action='store_true',
help='allow tests which have been marked as unstable when focused changes are detected')
integration.add_argument('--allow-unsupported',
action='store_true',
help='allow tests which have been marked as unsupported')
integration.add_argument('--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity')
integration.add_argument('--continue-on-error',
action='store_true',
help='continue after failed test')
integration.add_argument('--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy')
integration.add_argument('--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed')
integration.add_argument('--list-targets',
action='store_true',
help='list matching targets instead of running tests')
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
posix_integration = subparsers.add_parser('integration',
parents=[integration],
help='posix integration tests')
posix_integration.set_defaults(func=command_posix_integration,
targets=walk_posix_integration_targets,
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
add_httptester_options(posix_integration, argparse)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
help='network integration tests')
network_integration.set_defaults(func=command_network_integration,
targets=walk_network_integration_targets,
config=NetworkIntegrationConfig)
add_extra_docker_options(network_integration, integration=False)
network_integration.add_argument('--platform',
metavar='PLATFORM',
action='append',
help='network platform/version').completer = complete_network_platform
network_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
network_integration.add_argument('--testcase',
metavar='TESTCASE',
help='limit a test to a specified testcase').completer = complete_network_testcase
windows_integration = subparsers.add_parser('windows-integration',
parents=[integration],
help='windows integration tests')
windows_integration.set_defaults(func=command_windows_integration,
targets=walk_windows_integration_targets,
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
windows_integration.add_argument('--windows',
metavar='VERSION',
action='append',
help='windows version').completer = complete_windows
units = subparsers.add_parser('units',
parents=[test],
help='unit tests')
units.set_defaults(func=command_units,
targets=walk_units_targets,
config=UnitsConfig)
units.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
units.add_argument('--collect-only',
action='store_true',
help='collect tests but do not execute them')
units.add_argument('--requirements-mode',
choices=('only', 'skip'),
help=argparse.SUPPRESS)
add_extra_docker_options(units, integration=False)
sanity = subparsers.add_parser('sanity',
parents=[test],
help='sanity tests')
sanity.set_defaults(func=command_sanity,
targets=walk_sanity_targets,
config=SanityConfig)
sanity.add_argument('--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run').completer = complete_sanity_test
sanity.add_argument('--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip').completer = complete_sanity_test
sanity.add_argument('--allow-disabled',
action='store_true',
help='allow tests to run which are disabled by default')
sanity.add_argument('--list-tests',
action='store_true',
help='list available tests')
sanity.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
sanity.add_argument('--base-branch',
help=argparse.SUPPRESS)
add_lint(sanity)
add_extra_docker_options(sanity, integration=False)
shell = subparsers.add_parser('shell',
parents=[common],
help='open an interactive shell')
shell.set_defaults(func=command_shell,
config=ShellConfig)
add_environments(shell, tox_version=True)
add_extra_docker_options(shell)
add_httptester_options(shell, argparse)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
add_environments(coverage_common, tox_version=True, tox_only=True)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
coverage_combine = coverage_subparsers.add_parser('combine',
parents=[coverage_common],
help='combine coverage data and rewrite remote paths')
coverage_combine.set_defaults(func=lib.cover.command_coverage_combine,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_combine)
coverage_erase = coverage_subparsers.add_parser('erase',
parents=[coverage_common],
help='erase coverage data files')
coverage_erase.set_defaults(func=lib.cover.command_coverage_erase,
config=lib.cover.CoverageConfig)
coverage_report = coverage_subparsers.add_parser('report',
parents=[coverage_common],
help='generate console coverage report')
coverage_report.set_defaults(func=lib.cover.command_coverage_report,
config=lib.cover.CoverageReportConfig)
coverage_report.add_argument('--show-missing',
action='store_true',
help='show line numbers of statements not executed')
coverage_report.add_argument('--include',
metavar='PAT1,PAT2,...',
help='include only files whose paths match one of these '
'patterns. Accepts shell-style wildcards, which must be '
'quoted.')
coverage_report.add_argument('--omit',
metavar='PAT1,PAT2,...',
help='omit files whose paths match one of these patterns. '
'Accepts shell-style wildcards, which must be quoted.')
add_extra_coverage_options(coverage_report)
coverage_html = coverage_subparsers.add_parser('html',
parents=[coverage_common],
help='generate html coverage report')
coverage_html.set_defaults(func=lib.cover.command_coverage_html,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_html)
coverage_xml = coverage_subparsers.add_parser('xml',
parents=[coverage_common],
help='generate xml coverage report')
coverage_xml.set_defaults(func=lib.cover.command_coverage_xml,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_xml)
if argcomplete:
argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
args = parser.parse_args()
if args.explain and not args.verbosity:
args.verbosity = 1
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
else:
args.color = sys.stdout.isatty()
return args
def add_lint(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--lint',
action='store_true',
help='write lint output to stdout, everything else stderr')
parser.add_argument('--junit',
action='store_true',
help='write test failures to junit xml files')
parser.add_argument('--failure-ok',
action='store_true',
help='exit successfully on failed tests after saving results')
def add_changes(parser, argparse):
"""
:type parser: argparse.ArgumentParser
:type argparse: argparse
"""
parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
changes = parser.add_argument_group(title='change detection arguments')
changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
changes.add_argument('--untracked', action='store_true', help='include untracked files')
changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
def add_environments(parser, tox_version=False, tox_only=False):
"""
:type parser: argparse.ArgumentParser
:type tox_version: bool
:type tox_only: bool
"""
parser.add_argument('--requirements',
action='store_true',
help='install command requirements')
environments = parser.add_mutually_exclusive_group()
environments.add_argument('--local',
action='store_true',
help='run from the local environment')
if tox_version:
environments.add_argument('--tox',
metavar='VERSION',
nargs='?',
default=None,
const='.'.join(str(i) for i in sys.version_info[:2]),
choices=SUPPORTED_PYTHON_VERSIONS,
help='run from a tox virtualenv: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
else:
environments.add_argument('--tox',
action='store_true',
help='run from a tox virtualenv')
tox = parser.add_argument_group(title='tox arguments')
tox.add_argument('--tox-sitepackages',
action='store_true',
help='allow access to globally installed packages')
if tox_only:
environments.set_defaults(
docker=None,
remote=None,
remote_stage=None,
remote_provider=None,
remote_aws_region=None,
remote_terminate=None,
)
return
environments.add_argument('--docker',
metavar='IMAGE',
nargs='?',
default=None,
const='default',
help='run from a docker container').completer = complete_docker
environments.add_argument('--remote',
metavar='PLATFORM',
default=None,
help='run from a remote instance').completer = complete_remote_shell if parser.prog.endswith(' shell') else complete_remote
remote = parser.add_argument_group(title='remote arguments')
remote.add_argument('--remote-stage',
metavar='STAGE',
help='remote stage to use: %(choices)s',
choices=['prod', 'dev'],
default='prod')
remote.add_argument('--remote-provider',
metavar='PROVIDER',
help='remote provider to use: %(choices)s',
choices=['default', 'aws', 'azure', 'parallels'],
default='default')
remote.add_argument('--remote-aws-region',
metavar='REGION',
help='remote aws region to use: %(choices)s (default: auto)',
choices=sorted(AWS_ENDPOINTS),
default=None)
remote.add_argument('--remote-terminate',
metavar='WHEN',
help='terminate remote instance: %(choices)s (default: %(default)s)',
choices=['never', 'always', 'success'],
default='never')
def add_extra_coverage_options(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--group-by',
metavar='GROUP',
action='append',
choices=lib.cover.COVERAGE_GROUPS,
help='group output by: %s' % ', '.join(lib.cover.COVERAGE_GROUPS))
parser.add_argument('--all',
action='store_true',
help='include all python source files')
parser.add_argument('--stub',
action='store_true',
help='generate empty report of all python source files')
def add_httptester_options(parser, argparse):
"""
:type parser: argparse.ArgumentParser
:type argparse: argparse
"""
group = parser.add_mutually_exclusive_group()
group.add_argument('--httptester',
metavar='IMAGE',
default='quay.io/ansible/http-test-container:1.0.0',
help='docker image to use for the httptester container')
group.add_argument('--disable-httptester',
dest='httptester',
action='store_const',
const='',
help='do not use the httptester container')
parser.add_argument('--inject-httptester',
action='store_true',
help=argparse.SUPPRESS) # internal use only
def add_extra_docker_options(parser, integration=True):
"""
:type parser: argparse.ArgumentParser
:type integration: bool
"""
docker = parser.add_argument_group(title='docker arguments')
docker.add_argument('--docker-no-pull',
action='store_false',
dest='docker_pull',
help='do not explicitly pull the latest docker images')
docker.add_argument('--docker-keep-git',
action='store_true',
help='transfer git related files into the docker container')
docker.add_argument('--docker-seccomp',
metavar='SC',
choices=('default', 'unconfined'),
default=None,
help='set seccomp confinement for the test container: %(choices)s')
if not integration:
return
docker.add_argument('--docker-privileged',
action='store_true',
help='run docker container in privileged mode')
docker.add_argument('--docker-memory',
help='memory limit for docker in bytes', type=int)
def complete_target(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
return find_target_completion(parsed_args.targets, prefix)
def complete_remote(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
with open('test/runner/completion/remote.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix)]
def complete_remote_shell(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
with open('test/runner/completion/remote.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
# 2008 doesn't support SSH so we do not add to the list of valid images
with open('test/runner/completion/windows.txt', 'r') as completion_fd:
images.extend(["windows/%s" % i for i in completion_fd.read().splitlines() if i != '2008'])
return [i for i in images if i.startswith(prefix)]
def complete_docker(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
images = sorted(get_docker_completion().keys())
return [i for i in images if i.startswith(prefix)]
def complete_windows(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/windows.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/network.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_network_testcase(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
testcases = []
# since testcases are module specific, don't autocomplete if more than one
# module is specidied
if len(parsed_args.include) != 1:
return []
test_dir = 'test/integration/targets/%s/tests' % parsed_args.include[0]
connections = os.listdir(test_dir)
for conn in connections:
if os.path.isdir(os.path.join(test_dir, conn)):
for testcase in os.listdir(os.path.join(test_dir, conn)):
if testcase.startswith(prefix):
testcases.append(testcase.split('.')[0])
return testcases
def complete_sanity_test(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
tests = sorted(t.name for t in sanity_get_tests())
return [i for i in tests if i.startswith(prefix)]
if __name__ == '__main__':
main()
|
stuarteberg/numpy
|
refs/heads/master
|
numpy/ma/tests/test_core.py
|
16
|
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
import itertools
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
TestCase, run_module_suite, assert_raises, assert_warns, suppress_warnings)
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
suppress_copy_mask_on_assignment = suppress_warnings()
suppress_copy_mask_on_assignment.filter(
numpy.ma.core.MaskedArrayFutureWarning,
"setting an item on a masked array which has a shared mask will not copy")
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
x = array([1, 2, 3], mask=True)
assert_equal(x._mask, [True, True, True])
x = array([1, 2, 3], mask=False)
assert_equal(x._mask, [False, False, False])
y = array([1, 2, 3], mask=x._mask, copy=False)
assert_(np.may_share_memory(x.mask, y.mask))
y = array([1, 2, 3], mask=x._mask, copy=True)
assert_(not np.may_share_memory(x.mask, y.mask))
def test_creation_with_list_of_maskedarrays(self):
# Tests creating a masked array from a list of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
data = array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
self.assertFalse(m.flags.c_contiguous)
new_m = asarray(m)
self.assertTrue(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
self.assertFalse(m.flags.c_contiguous)
new_m = asarray(m, order='C')
self.assertTrue(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
a = np.ma.array([1, 2, 3])
b = np.ma.array([4, 5, 6])
a_copy_method = a.copy
b.copy
assert_equal(a_copy_method(), [1, 2, 3])
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
a = np.ma.arange(2000)
a[1:50] = np.ma.masked
assert_equal(
repr(a),
'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
' mask = [False True True ..., False False False],\n'
' fill_value = 999999)\n'
)
def test_pickling(self):
# Tests pickling
for dtype in (int, float, str, object):
a = arange(10).astype(dtype)
a.fill_value = 999
masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked
True, # Fully masked
False) # Fully unmasked
for mask in masks:
a.mask = mask
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
if dtype in (object, int):
assert_equal(a_pickled.fill_value, 999)
else:
assert_equal(a_pickled.fill_value, dtype(999))
assert_array_equal(a_pickled.mask, mask)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with suppress_warnings() as sup:
sup.filter(UserWarning, 'Warning: converting a masked element')
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda: float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
@suppress_copy_mask_on_assignment
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_with_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_with_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_with_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_with_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
# test if mask gets set correctly (see #6760)
Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
('f1', 'i1', (2, 2))], (2, 2))]))
assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
('f1', '?', (2, 2))], (2, 2))]))
def test_filled_with_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
# Test 0-d array with multi-dimensional dtype
t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
0.0),
mask = (False, [[True, False, True],
[False, False, True]],
False),
dtype = "int, (2,3)float, float")
control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
assert_equal(str(t_2d0), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
A = masked_array(data=[([0,1],)],
mask=[([True, False],)],
dtype=[("A", ">i2", (2,))])
assert_equal(A[0]["A"], A["A"][0])
assert_equal(A[0]["A"], masked_array(data=[0, 1],
mask=[True, False], dtype=">i2"))
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
# also check if there are object datatypes (see gh-7493)
mx = array([(1,), (2,)], dtype=[('a', 'O')])
assert_equal(str(mx[0]), "(1,)")
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i4', (3,))])
assert_(str(t_ma[0]) == "([1, --, 3],)")
assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additional tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i4', (2,2))])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i4'), ('b', '<i4')])
assert_(str(t_0d[0]) == "(--, 2)")
assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i4', (2,2)), ('b', float)])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i4'), ('b', 'i4,i4')])
assert_(str(t_ne[0]) == "(--, (--, 1))")
assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert_(mx[0] is mx1)
assert_(mx[1] is not mx2)
assert_(np.all(mx[1].data == mx2.data))
assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked singleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(ValueError, ott.count, axis=1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_ufunc_nomask(self):
# check the case ufuncs should set the mask to false
m = np.ma.array([1])
# check we don't get array([False], dtype=bool)
assert_equal(np.true_divide(m, 5).mask.shape, ())
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_count_mean_with_matrix(self):
m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
assert_equal(m.count(axis=0).shape, (1,2))
assert_equal(m.count(axis=1).shape, (2,1))
#make sure broadcasting inside mean and var work
assert_equal(m.mean(axis=0), [[2., 3.]])
assert_equal(m.mean(axis=1), [[1.5], [3.5]])
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_with_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# Deprecation is in place for arrays, and when it happens this
# test will fail (and have to be changed accordingly).
# With partial mask
with suppress_warnings() as sup:
sup.filter(FutureWarning, "Comparison to `None`")
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_with_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
# but when indexing, fill value should become scalar not tuple
# See issue #6723
M = masked_array(control)
assert_equal(M["f1"].fill_value.ndim, 0)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overridden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overridden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
def test_fillvalue_bytes_or_str(self):
# Test whether fill values work as expected for structured dtypes
# containing bytes or str. See issue #7259.
a = empty(shape=(3, ), dtype="(2)3S,(2)3U")
assert_equal(a["f0"].fill_value, default_fill_value(b"spam"))
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
def test_no_masked_nan_warnings(self):
# check that a nan in masked position does not
# cause ufunc warnings
m = np.ma.array([0.5, np.nan], mask=[0,1])
with warnings.catch_warnings():
warnings.filterwarnings("error")
# test unary and binary ufuncs
exp(m)
add(m, 1)
m > 0
# test different unary domains
sqrt(m)
log(m)
tan(m)
arcsin(m)
arccos(m)
arccosh(m)
# test binary domains
divide(m, 2)
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, "Failed on type=%s." % t)
else:
assert_equal(len(sup.log), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, "Failed on type=%s." % t)
else:
assert_equal(len(sup.log), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
@suppress_copy_mask_on_assignment
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
self.assertIs(x[1], np.ma.masked)
self.assertIs(x.take(1), np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
assert_equal(np.trace(mX), mX.trace())
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, out=r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, out=r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varmean_nomask(self):
# gh-5769
foo = array([1,2,3,4], dtype='f8')
bar = array([1,2,3,4], dtype='f8')
assert_equal(type(foo.mean()), np.float64)
assert_equal(type(foo.var()), np.float64)
assert((foo.mean() == bar.mean()) is np.bool_(True))
# check array type is preserved and out works
foo = array(np.arange(16).reshape((4,4)), dtype='f8')
bar = empty(4, dtype='f4')
assert_equal(type(foo.mean(axis=1)), MaskedArray)
assert_equal(type(foo.var(axis=1)), MaskedArray)
assert_(foo.mean(axis=1, out=bar) is bar)
assert_(foo.var(axis=1, out=bar) is bar)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_with_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
# test that nomask is returned when m is nomask.
bools = [True, False]
dtypes = [MaskType, np.float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
assert_(res is nomask, msgformat % (cpy, shr, dt))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standard dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compressed() overridden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
def test_convolve(self):
a = masked_equal(np.arange(5), 2)
b = np.array([1, 1])
test = np.ma.convolve(a, b)
assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1))
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1))
test = np.ma.convolve([1, 1], [1, 1, 1])
assert_equal(test, masked_equal([1, 2, 2, 1], -1))
a = [1, 1]
b = masked_equal([1, -1, -1, 1], -1)
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1))
test = np.ma.convolve(a, b, propagate_mask=True)
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
class TestOptionalArgs(TestCase):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including mutliple axes)
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
# mask out last element of last dimension
m[:,:,-1] = True
a = np.ma.array(d, mask=m)
def testaxis(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test axis arg
assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))
assert_equal(ma_f(a, axis=(0,1))[...,:-1],
numpy_f(d[...,:-1], axis=(0,1)))
def testkeepdims(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test keepdims arg
assert_equal(ma_f(a, keepdims=True).shape,
numpy_f(d, keepdims=True).shape)
assert_equal(ma_f(a, keepdims=False).shape,
numpy_f(d, keepdims=False).shape)
# test both at once
assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=1, keepdims=True))
assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=(0,1), keepdims=True))
for f in ['sum', 'prod', 'mean', 'var', 'std']:
testaxis(f, a, d)
testkeepdims(f, a, d)
for f in ['min', 'max']:
testaxis(f, a, d)
d = (np.arange(24).reshape((2,3,4))%2 == 0)
a = np.ma.array(d, mask=m)
for f in ['all', 'any']:
testaxis(f, a, d)
testkeepdims(f, a, d)
def test_count(self):
# test np.ma.count specially
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
m[:,0,:] = True
a = np.ma.array(d, mask=m)
assert_equal(count(a), 16)
assert_equal(count(a, axis=1), 2*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
assert_equal(count(a, axis=-2), 2*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(ValueError, count, a, axis=3)
# check the 'nomask' path
a = np.ma.array(d, mask=nomask)
assert_equal(count(a), 24)
assert_equal(count(a, axis=1), 3*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
assert_equal(np.ndim(count(a, keepdims=True)), 3)
assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
assert_equal(count(a, axis=-2), 3*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(ValueError, count, a, axis=3)
# check the 'masked' singleton
assert_equal(count(np.ma.masked), 0)
# check 0-d arrays do not allow axis > 0
assert_raises(ValueError, count, np.ma.array(1), axis=1)
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
###############################################################################
if __name__ == "__main__":
run_module_suite()
|
seshurajup/phrasal
|
refs/heads/master
|
ptm/scripts/human_eval/mfas_solver.py
|
10
|
#!/usr/bin/python
import sys
from collections import defaultdict
from collections import namedtuple
# Utility functions
def bitmap(sequence):
""" Generate a coverage bitmap for a sequence of indexes """
return reduce(lambda x,y: x|y, [long('1'+'0'*i,2) for i in sequence], 0)
def bitmap2str(b, n, on='o', off='.'):
""" Generate a length-n string representation of bitmap b """
return '' if n==0 else (on if b&1==1 else off) + bitmap2str(b>>1, n-1, on, off)
def indexes(b, n=0):
""" Generate a list of indexes that are turned on in a bitmap b """
return [] if b==0 else ([n] if b&1==1 else []) + indexes(b>>1, n+1)
#pairwise_score = defaultdict(int)
#vertices = set()
#for line in sys.stdin:
# (count, sys1, sys2, order) = line.strip().split()
# pairwise_score[(sys1,sys2)] += int(count) if order == "<" else -int(count)
# vertices.add(sys1)
# vertices.add(sys2)
def lopez_solver(pairwise_score, vertices):
tournament = defaultdict(dict)
for (u, v), weight in pairwise_score.iteritems():
if weight > 0:
tournament[u][v] = weight
vertices = list(vertices)
hypothesis = namedtuple("hypothesis", "cost, state, predecessor, vertex")
initial_hypothesis = hypothesis(0, 0, None, None)
agenda = {}
agenda[0] = initial_hypothesis
goal = bitmap(xrange(len(vertices)))
while len(agenda) > 0:
h = sorted(agenda.itervalues(), key=lambda h: h.cost)[0]
if h.state == goal:
break
#print bitmap2str(h.state,len(vertices),on='1',off='0'),h.cost
del agenda[h.state]
for u in indexes(goal^h.state):
new_state = h.state | bitmap([u])
added_cost = 0
for v in indexes(goal^new_state):
if vertices[v] in tournament[vertices[u]]:
added_cost += tournament[vertices[u]][vertices[v]]
new_h = hypothesis(h.cost + added_cost, new_state, h, vertices[u])
if new_state not in agenda or agenda[new_state].cost > new_h.cost:
agenda[new_state] = new_h
ranking = []
while h.vertex:
ranking.append(h.vertex)
h = h.predecessor
return ranking
#def extract_ranking(h):
#return "" if h.predecessor is None else "%s%s\n" % (extract_ranking(h.predecessor), h.vertex)
#sys.stdout.write(extract_ranking(h))
|
jcpowermac/ansible-modules-extras
|
refs/heads/devel
|
cloud/amazon/ec2_vpc_net_facts.py
|
6
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net_facts
short_description: Gather facts about ec2 VPCs in AWS
description:
- Gather facts about ec2 VPCs in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPCs
- ec2_vpc_net_facts:
# Gather facts about a particular VPC using VPC ID
- ec2_vpc_net_facts:
filters:
vpc-id: vpc-00112233
# Gather facts about any VPC with a tag key Name and value Example
- ec2_vpc_net_facts:
filters:
"tag:Name": Example
'''
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_vpc_info(vpc):
try:
classic_link = vpc.classic_link_enabled
except AttributeError:
classic_link = False
vpc_info = { 'id': vpc.id,
'instance_tenancy': vpc.instance_tenancy,
'classic_link_enabled': classic_link,
'dhcp_options_id': vpc.dhcp_options_id,
'state': vpc.state,
'is_default': vpc.is_default,
'cidr_block': vpc.cidr_block,
'tags': vpc.tags
}
return vpc_info
def list_ec2_vpcs(connection, module):
filters = module.params.get("filters")
vpc_dict_array = []
try:
all_vpcs = connection.get_all_vpcs(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for vpc in all_vpcs:
vpc_dict_array.append(get_vpc_info(vpc))
module.exit_json(vpcs=vpc_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_vpcs(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
trangel/Insight-Data-Science
|
refs/heads/master
|
flaskapp/__init__.py
|
1
|
from flask import Flask
app = Flask(__name__)
from flaskapp import views
|
Lh4cKg/brython
|
refs/heads/master
|
www/src/Lib/test/unittests/test_with.py
|
83
|
#!/usr/bin/env python3
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import _GeneratorContextManager, contextmanager
from test.support import run_unittest
class MockContextManager(_GeneratorContextManager):
def __init__(self, func, *args, **kwds):
super().__init__(func, *args, **kwds)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return _GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return _GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func, *args, **kwds)
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0](ex[1]).with_traceback(ex[2])
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager,
exc_type=None):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
if exc_type is None:
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
exc_type = type(self.TEST_EXCEPTION)
self.assertEqual(mock_manager.exit_args[0], exc_type)
# Test the __exit__ arguments. Issue #7853
self.assertIsInstance(mock_manager.exit_args[1], exc_type)
self.assertIsNot(mock_manager.exit_args[2], None)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(ContextmanagerAssertionMixin, unittest.TestCase):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testExceptionNormalized(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
# Note this relies on the fact that 1 // 0 produces an exception
# that is not normalized immediately.
1 // 0
self.assertRaises(ZeroDivisionError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm, ZeroDivisionError)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise next(iter([]))
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __bool__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
|
netman92/coala
|
refs/heads/master
|
tests/output/InteractionsTest.py
|
25
|
import unittest
from pyprint.NullPrinter import NullPrinter
from coalib.output.Interactions import fail_acquire_settings
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.settings.Section import Section
class InteractionsTest(unittest.TestCase):
def test_(self):
log_printer = LogPrinter(NullPrinter())
section = Section('')
self.assertRaises(TypeError, fail_acquire_settings, log_printer, None,
section)
self.assertRaises(AssertionError,
fail_acquire_settings,
log_printer,
{'setting': ['description', 'bear']}, section)
self.assertEqual(fail_acquire_settings(log_printer, {}, section), None,
section)
|
hagabbar/pycbc_copy
|
refs/heads/master
|
pycbc/fft/cufft.py
|
2
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides the cufft backend of the fast Fourier transform
for the PyCBC package.
"""
import pycbc.scheme
import skcuda.fft as cu_fft
from .core import _BaseFFT, _BaseIFFT
_forward_plans = {}
_reverse_plans = {}
#These dicts need to be cleared before the cuda context is destroyed
def _clear_plan_dicts():
_forward_plans.clear()
_reverse_plans.clear()
pycbc.scheme.register_clean_cuda(_clear_plan_dicts)
#itype and otype are actual dtypes here, not strings
def _get_fwd_plan(itype, otype, inlen, batch=1):
try:
theplan = _forward_plans[(itype, otype, inlen, batch)]
except KeyError:
theplan = cu_fft.Plan((inlen,), itype, otype, batch=batch)
_forward_plans.update({(itype, otype, inlen) : theplan })
return theplan
#The complex to real plan wants the actual size, not the N/2+1
#That's why the inverse plans use the outvec length, instead of the invec
def _get_inv_plan(itype, otype, outlen, batch=1):
try:
theplan = _reverse_plans[(itype, otype, outlen, batch)]
except KeyError:
theplan = cu_fft.Plan((outlen,), itype, otype, batch=batch)
_reverse_plans.update({(itype, otype, outlen) : theplan })
return theplan
def fft(invec, outvec, prec, itype, otype):
cuplan = _get_fwd_plan(invec.dtype, outvec.dtype, len(invec))
cu_fft.fft(invec.data, outvec.data, cuplan)
def ifft(invec, outvec, prec, itype, otype):
cuplan = _get_inv_plan(invec.dtype, outvec.dtype, len(outvec))
cu_fft.ifft(invec.data, outvec.data, cuplan)
class FFT(_BaseFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
self.plan = _get_fwd_plan(invec.dtype, outvec.dtype, len(invec), batch=nbatch)
self.invec = invec.data
self.outvec = outvec.data
def execute(self):
cu_fft.fft(self.invec, self.outvec, self.plan)
class IFFT(_BaseIFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
self.plan = _get_inv_plan(invec.dtype, outvec.dtype, len(outvec), batch=nbatch)
self.invec = invec.data
self.outvec = outvec.data
def execute(self):
cu_fft.ifft(self.invec, self.outvec, self.plan)
|
gnuhub/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/qualifiedImport/before/src/a.py
|
415
|
def f(x):
return x
|
htzy/bigfour
|
refs/heads/master
|
lms/djangoapps/courseware/management/commands/regrade_partial.py
|
38
|
'''
This is a one-off command aimed at fixing a temporary problem encountered where partial credit was awarded for
code problems, but the resulting score (or grade) was mistakenly set to zero because of a bug in
CorrectMap.get_npoints().
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from courseware.models import StudentModule
from capa.correctmap import CorrectMap
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to recalculate the score/grade based on the partial credit.
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-08 15:45:00' (the problem must have been answered before the fix was installed,
on Prod and Edge)
modified > '2013-03-07 20:18:00' (the problem must have been visited after the bug was introduced)
state like '%"npoints": 0.%' (the problem must have some form of partial credit).
'''
num_visited = 0
num_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'), )
def fix_studentmodules(self, save_changes):
'''Identify the list of StudentModule objects that might need fixing, and then fix each one'''
modules = StudentModule.objects.filter(modified__gt='2013-03-07 20:18:00',
created__lt='2013-03-08 15:45:00',
state__contains='"npoints": 0.')
for module in modules:
self.fix_studentmodule_grade(module, save_changes)
def fix_studentmodule_grade(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info(
u"No state found for %s module %s for student %s in course %s",
module.module_type,
module.module_state_key,
module.student.username,
module.course_id,
)
return
state_dict = json.loads(module_state)
self.num_visited += 1
# LoncapaProblem.get_score() checks student_answers -- if there are none, we will return a grade of 0
# Check that this is the case, but do so sooner, before we do any of the other grading work.
student_answers = state_dict['student_answers']
if (not student_answers) or len(student_answers) == 0:
# we should not have a grade here:
if module.grade != 0:
log_msg = (
u"No answer found but grade %(grade)s exists for %(type)s module %(id)s for student %(student)s " +
u"in course %(course_id)s"
)
LOG.error(log_msg, {
"grade": module.grade,
"type": module.module_type,
"id": module.module_state_key,
"student": module.student.username,
"course_id": module.course_id,
})
else:
log_msg = (
u"No answer and no grade found for %(type)s module %(id)s for student %(student)s " +
u"in course %(course_id)s"
)
LOG.debug(log_msg, {
"grade": module.grade,
"type": module.module_type,
"id": module.module_state_key,
"student": module.student.username,
"course_id": module.course_id,
})
return
# load into a CorrectMap, as done in LoncapaProblem.__init__():
correct_map = CorrectMap()
if 'correct_map' in state_dict:
correct_map.set_dict(state_dict['correct_map'])
# calculate score the way LoncapaProblem.get_score() works, by deferring to
# CorrectMap's get_npoints implementation.
correct = 0
for key in correct_map:
correct += correct_map.get_npoints(key)
if module.grade == correct:
# nothing to change
log_msg = u"Grade matches for %(type)s module %(id)s for student %(student)s in course %(course_id)s"
LOG.debug(log_msg, {
"type": module.module_type,
"id": module.module_state_key,
"student": module.student.username,
"course_id": module.course_id,
})
elif save_changes:
# make the change
log_msg = (
u"Grade changing from %(grade)s to %(correct)s for %(type)s module " +
u"%(id)s for student %(student)s in course %(course_id)s"
)
LOG.debug(log_msg, {
"grade": module.grade,
"correct": correct,
"type": module.module_type,
"id": module.module_state_key,
"student": module.student.username,
"course_id": module.course_id,
})
module.grade = correct
module.save()
self.num_changed += 1
else:
# don't make the change, but log that the change would be made
log_msg = (
u"Grade would change from %(grade)s to %(correct)s for %(type)s module %(id)s for student " +
u"%(student)s in course %(course_id)s"
)
LOG.debug(log_msg, {
"grade": module.grade,
"correct": correct,
"type": module.module_type,
"id": module.module_state_key,
"student": module.student.username,
"course_id": module.course_id,
})
self.num_changed += 1
def handle(self, **options):
'''Handle management command request'''
save_changes = options['save_changes']
LOG.info("Starting run: save_changes = {0}".format(save_changes))
self.fix_studentmodules(save_changes)
LOG.info("Finished run: updating {0} of {1} modules".format(self.num_changed, self.num_visited))
|
patrickspencer/compass
|
refs/heads/master
|
scripts/cli_shortcuts/setup.py
|
2
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from setuptools import setup
readme = open('README.rst').read()
setup(
name = 'cli-shortcuts',
version = '1.5',
description = "You spend way too much time typing 'python manage.py'",
long_description = readme,
author = "Johannes Gorset, Patrick Spencer",
author_email = "jgorset@gmail.com, patrick.spencer@mail.mizzou.edu",
py_modules = ['cli_shortcuts'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
entry_points={
'console_scripts': [
'dj = cli_shortcuts:main',
]
},
)
|
cancan101/StarCluster
|
refs/heads/remove_hacks_squash
|
starcluster/commands/spothistory.py
|
4
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta
from starcluster import utils
from starcluster import static
from base import CmdBase
class CmdSpotHistory(CmdBase):
"""
spothistory [options] <instance_type>
Show spot instance pricing history stats (last 30 days by default)
Examples:
Show the current, max, and average spot price for m1.small instance type:
$ starcluster spothistory m1.small
Do the same but also plot the spot history over time in a web browser:
$ starcluster spothistory -p m1.small
Show it based on a current cluster config and zone
$ starcluster spothistory -c <cluster-name>
"""
names = ['spothistory', 'shi']
def addopts(self, parser):
parser.add_option("-z", "--zone", dest="zone", default=None,
help="limit results to specific availability zone")
parser.add_option("-d", "--days", dest="days_ago",
action="store", type="float", default=None,
help="provide history in the last DAYS_AGO days "
"(overrides -s option)")
parser.add_option("-s", "--start-time", dest="start_time",
action="callback", type="string", default=None,
callback=self._iso_timestamp,
help="show price history after START_TIME (UTC)"
"(e.g. 2010-01-15T22:22:22Z)")
parser.add_option("-e", "--end-time", dest="end_time",
action="callback", type="string", default=None,
callback=self._iso_timestamp,
help="show price history up until END_TIME (UTC)"
"(e.g. 2010-02-15T22:22:22Z)")
parser.add_option("-p", "--plot", dest="plot",
action="store_true", default=False,
help="plot spot history in a web browser")
parser.add_option("--cluster-name", dest="cluster_name",
default=None,
help="limit results to the clusters master node "
"availability zone")
parser.add_option("-v", "--vpc", dest="vpc",
action="store_true", default=False,
help="show spot prices for VPC")
parser.add_option("-c", "--classic", dest="classic",
action="store_true", default=False,
help="show spot prices for EC2-Classic")
def execute(self, args):
instance_types = ', '.join(static.INSTANCE_TYPES.keys())
zone = None
instance_type = None
if self.opts.cluster_name:
cl = self.cm.get_cluster(self.opts.cluster_name,
require_keys=False)
instance_type = cl.node_instance_type
zone = cl.nodes[0].placement
self.log.info("Cluster zone: " + zone)
self.log.info("Cluster node instance type: " + instance_type)
if self.opts.zone:
if zone:
self.log.info("You specified a zone and a cluster to get the "
"zone from. Using the cluster zone.")
else:
zone = self.opts.zone
self.log.info("Specified zone: " + zone)
if instance_type:
if len(args) == 1:
self.log.info("You provided an instance type and a cluster to "
"get the instance type from. Using the cluster "
"instance type.")
elif len(args) != 1:
self.parser.error(
'please provide an instance type (options: %s)' %
instance_types)
else:
instance_type = args[0]
self.log.info("Specified instance type: " + instance_type)
if instance_type not in static.INSTANCE_TYPES:
self.parser.error(
'invalid instance type. possible options: %s' %
instance_types)
if self.opts.classic and self.opts.vpc:
self.parser.error("options -c and -v cannot be specified at "
"the same time")
instance_type = args[0]
if instance_type not in static.INSTANCE_TYPES:
self.parser.error('invalid instance type. possible options: %s' %
instance_types)
start = self.opts.start_time
end = self.opts.end_time
if self.opts.days_ago:
if self.opts.start_time:
self.parser.error("options -d and -s cannot be specified at "
"the same time")
if self.opts.end_time:
end_tup = utils.iso_to_datetime_tuple(self.opts.end_time)
else:
end_tup = utils.get_utc_now()
start = utils.datetime_tuple_to_iso(
end_tup - timedelta(days=self.opts.days_ago))
browser_cmd = self.cfg.globals.get("web_browser")
self.ec2.get_spot_history(instance_type, start, end,
zone=self.opts.zone, plot=self.opts.plot,
plot_web_browser=browser_cmd,
vpc=self.opts.vpc,
classic=self.opts.classic)
|
avadacatavra/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/formatters.py
|
20
|
import json
from mozlog.structured.formatters.base import BaseFormatter
class WptreportFormatter(BaseFormatter):
"""Formatter that produces results in the format that wpreport expects."""
def __init__(self):
self.raw_results = {}
def suite_end(self, data):
results = {}
results["results"] = []
for test_name in self.raw_results:
result = {"test": test_name}
result.update(self.raw_results[test_name])
results["results"].append(result)
return json.dumps(results)
def find_or_create_test(self, data):
test_name = data["test"]
if test_name not in self.raw_results:
self.raw_results[test_name] = {
"subtests": [],
"status": "",
"message": None
}
return self.raw_results[test_name]
def create_subtest(self, data):
test = self.find_or_create_test(data)
subtest_name = data["subtest"]
subtest = {
"name": subtest_name,
"status": "",
"message": None
}
test["subtests"].append(subtest)
return subtest
def test_status(self, data):
subtest = self.create_subtest(data)
subtest["status"] = data["status"]
if "message" in data:
subtest["message"] = data["message"]
def test_end(self, data):
test = self.find_or_create_test(data)
test["status"] = data["status"]
if "message" in data:
test["message"] = data["message"]
|
as110/as110.github.io
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/trac.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.trac
~~~~~~~~~~~~~~~~~~~~
Port of the default trac highlighter design.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class TracStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #999988',
Comment.Preproc: 'bold noitalic #999999',
Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#bb8844',
String.Regex: '#808000',
Number: '#009999',
Keyword: 'bold',
Keyword.Type: '#445588',
Name.Builtin: '#999999',
Name.Function: 'bold #990000',
Name.Class: 'bold #445588',
Name.Exception: 'bold #990000',
Name.Namespace: '#555555',
Name.Variable: '#008080',
Name.Constant: '#008080',
Name.Tag: '#000080',
Name.Attribute: '#008080',
Name.Entity: '#800080',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
mattvonrocketstein/smash
|
refs/heads/master
|
smashlib/ipy3x/lib/deepreload.py
|
1
|
# -*- coding: utf-8 -*-
"""
Provides a reload() function that acts recursively.
Python's normal :func:`python:reload` function only reloads the module that it's
passed. The :func:`reload` function in this module also reloads everything
imported from that module, which is useful when you're changing files deep
inside a package.
To use this as your default reload function, type this for Python 2::
import __builtin__
from IPython.lib import deepreload
__builtin__.reload = deepreload.reload
Or this for Python 3::
import builtins
from IPython.lib import deepreload
builtins.reload = deepreload.reload
A reference to the original :func:`python:reload` is stored in this module as
:data:`original_reload`, so you can restore it later.
This code is almost entirely based on knee.py, which is a Python
re-implementation of hierarchical module import.
"""
from __future__ import print_function
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from contextlib import contextmanager
import imp
import sys
from types import ModuleType
from warnings import warn
from IPython.utils.py3compat import builtin_mod, builtin_mod_name
original_import = builtin_mod.__import__
@contextmanager
def replace_import_hook(new_import):
saved_import = builtin_mod.__import__
builtin_mod.__import__ = new_import
try:
yield
finally:
builtin_mod.__import__ = saved_import
def get_parent(globals, level):
"""
parent, name = get_parent(globals, level)
Return the package that an import is being performed in. If globals comes
from the module foo.bar.bat (not itself a package), this returns the
sys.modules entry for foo.bar. If globals is from a package's __init__.py,
the package's entry in sys.modules is returned.
If globals doesn't come from a package or a module in a package, or a
corresponding entry is not found in sys.modules, None is returned.
"""
orig_level = level
if not level or not isinstance(globals, dict):
return None, ''
pkgname = globals.get('__package__', None)
if pkgname is not None:
# __package__ is set, so use it
if not hasattr(pkgname, 'rindex'):
raise ValueError('__package__ set to non-string')
if len(pkgname) == 0:
if level > 0:
raise ValueError('Attempted relative import in non-package')
return None, ''
name = pkgname
else:
# __package__ not set, so figure it out and set it
if '__name__' not in globals:
return None, ''
modname = globals['__name__']
if '__path__' in globals:
# __path__ is set, so modname is already the package name
globals['__package__'] = name = modname
else:
# Normal module, so work out the package name if any
lastdot = modname.rfind('.')
if lastdot < 0 and level > 0:
raise ValueError("Attempted relative import in non-package")
if lastdot < 0:
globals['__package__'] = None
return None, ''
globals['__package__'] = name = modname[:lastdot]
dot = len(name)
for x in range(level, 1, -1):
try:
dot = name.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
name = name[:dot]
try:
parent = sys.modules[name]
except:
if orig_level < 1:
warn("Parent module '%.200s' not found while handling absolute "
"import" % name)
parent = None
else:
raise SystemError("Parent module '%.200s' not loaded, cannot "
"perform relative import" % name)
# We expect, but can't guarantee, if parent != None, that:
# - parent.__name__ == name
# - parent.__dict__ is globals
# If this is violated... Who cares?
return parent, name
def load_next(mod, altmod, name, buf):
"""
mod, name, buf = load_next(mod, altmod, name, buf)
altmod is either None or same as mod
"""
if len(name) == 0:
# completely empty module name should only happen in
# 'from . import' (or '__import__("")')
return mod, None, buf
dot = name.find('.')
if dot == 0:
raise ValueError('Empty module name')
if dot < 0:
subname = name
next = None
else:
subname = name[:dot]
next = name[dot + 1:]
if buf != '':
buf += '.'
buf += subname
result = import_submodule(mod, subname, buf)
if result is None and mod != altmod:
result = import_submodule(altmod, subname, subname)
if result is not None:
buf = subname
if result is None:
raise ImportError("No module named %.200s" % name)
return result, next, buf
# Need to keep track of what we've already reloaded to prevent cyclic evil
found_now = {}
def import_submodule(mod, subname, fullname):
"""m = import_submodule(mod, subname, fullname)"""
# Require:
# if mod == None: subname == fullname
# else: mod.__name__ + "." + subname == fullname
global found_now
if fullname in found_now and fullname in sys.modules:
m = sys.modules[fullname]
else:
print('Reloading', fullname)
found_now[fullname] = 1
oldm = sys.modules.get(fullname, None)
if mod is None:
path = None
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
return None
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
except ImportError:
return None
try:
m = imp.load_module(fullname, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
if oldm:
sys.modules[fullname] = oldm
raise
finally:
if fp:
fp.close()
add_submodule(mod, m, fullname, subname)
return m
def add_submodule(mod, submod, fullname, subname):
"""mod.{subname} = submod"""
if mod is None:
return # Nothing to do here.
if submod is None:
submod = sys.modules[fullname]
setattr(mod, subname, submod)
return
def ensure_fromlist(mod, fromlist, buf, recursive):
"""Handle 'from module import a, b, c' imports."""
if not hasattr(mod, '__path__'):
return
for item in fromlist:
if not hasattr(item, 'rindex'):
raise TypeError("Item in ``from list'' not a string")
if item == '*':
if recursive:
continue # avoid endless recursion
try:
all = mod.__all__
except AttributeError:
pass
else:
ret = ensure_fromlist(mod, all, buf, 1)
if not ret:
return 0
elif not hasattr(mod, item):
import_submodule(mod, item, buf + '.' + item)
def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
"""Replacement for __import__()"""
parent, buf = get_parent(globals, level)
head, name, buf = load_next(
parent, None if level < 0 else parent, name, buf)
tail = head
while name:
tail, name, buf = load_next(tail, tail, name, buf)
# If tail is None, both get_parent and load_next found
# an empty module name: someone called __import__("") or
# doctored faulty bytecode
if tail is None:
raise ValueError('Empty module name')
if not fromlist:
return head
ensure_fromlist(tail, fromlist, buf, 0)
return tail
modules_reloading = {}
def deep_reload_hook(m):
"""Replacement for reload()."""
if not isinstance(m, ModuleType):
raise TypeError("reload() argument must be module")
name = m.__name__
if name not in sys.modules:
raise ImportError("reload(): module %.200s not in sys.modules" % name)
global modules_reloading
try:
return modules_reloading[name]
except:
modules_reloading[name] = m
dot = name.rfind('.')
if dot < 0:
subname = name
path = None
else:
try:
parent = sys.modules[name[:dot]]
except KeyError:
modules_reloading.clear()
raise ImportError(
"reload(): parent %.200s not in sys.modules" % name[:dot])
subname = name[dot + 1:]
path = getattr(parent, "__path__", None)
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
finally:
modules_reloading.clear()
try:
newm = imp.load_module(name, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
sys.modules[name] = m
raise
finally:
if fp:
fp.close()
modules_reloading.clear()
return newm
# Save the original hooks
try:
original_reload = builtin_mod.reload
except AttributeError:
original_reload = imp.reload # Python 3
# Replacement for reload()
def reload(module, exclude=['sys', 'os.path', builtin_mod_name, '__main__']):
"""Recursively reload all modules used in the given module. Optionally
takes a list of modules to exclude from reloading. The default exclude
list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
display, exception, and io hooks.
"""
global found_now
for i in exclude:
found_now[i] = 1
try:
with replace_import_hook(deep_import_hook):
return deep_reload_hook(module)
finally:
found_now = {}
# Uncomment the following to automatically activate deep reloading whenever
# this module is imported
#builtin_mod.reload = reload
|
noironetworks/heat
|
refs/heads/master
|
heat/tests/openstack/sahara/test_image.py
|
3
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import image
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
sahara_image_template = """
heat_template_version: 2015-10-15
resources:
sahara-image:
type: OS::Sahara::ImageRegistry
properties:
image: sahara-icehouse-vanilla-1.2.1-ubuntu-13.10
username: ubuntu
tags:
- vanilla
- 1.2.1
"""
class SaharaImageTest(common.HeatTestCase):
def setUp(self):
super(SaharaImageTest, self).setUp()
self.tmpl = template_format.parse(sahara_image_template)
self.stack = utils.parse_stack(self.tmpl)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['sahara-image']
self.client = mock.Mock()
self.patchobject(image.SaharaImageRegistry, 'client',
return_value=self.client)
self.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id',
return_value='12345')
def _create_resource(self, name, snippet, stack):
img = image.SaharaImageRegistry(name, snippet, stack)
scheduler.TaskRunner(img.create)()
return img
def test_create(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
args = ('12345', 'ubuntu', '')
self.client.images.update_image.assert_called_once_with(*args)
self.client.images.update_tags.assert_called_once_with(
'12345', ['vanilla', '1.2.1'])
self.assertEqual('12345', img.resource_id)
expected_state = (img.CREATE, img.COMPLETE)
self.assertEqual(expected_state, img.state)
def test_update(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
props = self.tmpl['resources']['sahara-image']['properties'].copy()
props['tags'] = []
props['description'] = 'test image'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(img.update, self.rsrc_defn)()
tags_update_calls = [
mock.call('12345', ['vanilla', '1.2.1']),
mock.call('12345', [])
]
image_update_calls = [
mock.call('12345', 'ubuntu', ''),
mock.call('12345', 'ubuntu', 'test image')
]
self.client.images.update_image.assert_has_calls(image_update_calls)
self.client.images.update_tags.assert_has_calls(tags_update_calls)
self.assertEqual((img.UPDATE, img.COMPLETE), img.state)
def test_delete(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
scheduler.TaskRunner(img.delete)()
self.assertEqual((img.DELETE, img.COMPLETE), img.state)
self.client.images.unregister_image.assert_called_once_with(
img.resource_id)
def test_delete_not_found(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
self.client.images.unregister_image.side_effect = (
sahara.sahara_base.APIException(error_code=404))
scheduler.TaskRunner(img.delete)()
self.assertEqual((img.DELETE, img.COMPLETE), img.state)
self.client.images.unregister_image.assert_called_once_with(
img.resource_id)
def test_show_attribute(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'img': 'info'}
self.client.images.get.return_value = value
self.assertEqual({'img': 'info'}, img.FnGetAtt('show'))
|
elenagradovich/python_training
|
refs/heads/master
|
conftest.py
|
1
|
import pytest
from fixture.application import Application
import json
import os.path
new words plesssssss
fixture = None
target = None
@pytest.fixture
def app(request):
global fixture
global target
browser = request.config.getoption("--browser")
#Путь к текущему файлу
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption("--target"))
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption("--target"))
#Чтение файла конфигурации
with open(config_file) as f:#f объект кот.указывает на открытый файл
target = json.load(f)
#Пользователь создает новую фикстуру
if fixture is None or not fixture.is_valid():#Переинициализация фикстуры, с ней что-то случилось и нужно заново создать
fixture = Application(browser=browser, base_url=target['baseUrl'])
fixture.session.ensure_login(username=target["username"], password=target["password"])
return fixture
@pytest.fixture(scope='session', autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
|
mavit/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py
|
55
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_scheduling_policy_facts
short_description: Retrieve facts about one or more oVirt scheduling policies
author: "Ondra Machacek (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt scheduling policies."
notes:
- "This module creates a new top-level C(ovirt_scheduling_policies) fact,
which contains a list of scheduling policies."
options:
id:
description:
- "ID of the scheduling policy."
required: true
name:
description:
- "Name of the scheduling policy, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all scheduling policies with name InClusterUpgrade:
- ovirt_scheduling_policy_facts:
name: InClusterUpgrade
- debug:
var: ovirt_scheduling_policies
'''
RETURN = '''
ovirt_scheduling_policies:
description: "List of dictionaries describing the scheduling policies.
Scheduling policies attributes are mapped to dictionary keys,
all scheduling policies attributes can be found at following
url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
id=dict(default=None),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
system_service = connection.system_service()
sched_policies_service = system_service.scheduling_policies_service()
if module.params['name']:
sched_policies = [
e for e in sched_policies_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
elif module.params['id']:
sched_policies = [
sched_policies_service.service(module.params['id']).get()
]
else:
sched_policies = sched_policies_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_scheduling_policies=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in sched_policies
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
cpennington/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/tests/test_git_export.py
|
1
|
"""
Unittests for exporting to git via management command.
"""
import copy
import os
import shutil
from six import StringIO
import subprocess
import unittest
from uuid import uuid4
import six
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
import contentstore.git_export_utils as git_export_utils
from contentstore.git_export_utils import GitExportError
from contentstore.tests.utils import CourseTestCase
FEATURES_WITH_EXPORT_GIT = settings.FEATURES.copy()
FEATURES_WITH_EXPORT_GIT['ENABLE_EXPORT_GIT'] = True
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
@override_settings(FEATURES=FEATURES_WITH_EXPORT_GIT)
class TestGitExport(CourseTestCase):
"""
Excercise the git_export django management command with various inputs.
"""
def setUp(self):
"""
Create/reinitialize bare repo and folders needed
"""
super(TestGitExport, self).setUp()
if not os.path.isdir(git_export_utils.GIT_REPO_EXPORT_DIR):
os.mkdir(git_export_utils.GIT_REPO_EXPORT_DIR)
self.addCleanup(shutil.rmtree, git_export_utils.GIT_REPO_EXPORT_DIR)
self.bare_repo_dir = '{0}/data/test_bare.git'.format(
os.path.abspath(settings.TEST_ROOT))
if not os.path.isdir(self.bare_repo_dir):
os.mkdir(self.bare_repo_dir)
self.addCleanup(shutil.rmtree, self.bare_repo_dir)
subprocess.check_output(['git', '--bare', 'init'],
cwd=self.bare_repo_dir)
def test_command(self):
"""
Test that the command interface works. Ignore stderr for clean
test output.
"""
with self.assertRaisesRegex(CommandError, 'Error: unrecognized arguments:*'):
call_command('git_export', 'blah', 'blah', 'blah', stderr=StringIO())
if six.PY2:
with self.assertRaisesMessage(CommandError, 'Error: too few arguments'):
call_command('git_export', stderr=StringIO())
else:
with self.assertRaisesMessage(
CommandError,
'Error: the following arguments are required: course_loc, git_url'
):
call_command('git_export', stderr=StringIO())
# Send bad url to get course not exported
with self.assertRaisesRegex(CommandError, six.text_type(GitExportError.URL_BAD)):
call_command('git_export', 'foo/bar/baz', 'silly', stderr=StringIO())
# Send bad course_id to get course not exported
with self.assertRaisesRegex(CommandError, six.text_type(GitExportError.BAD_COURSE)):
call_command('git_export', 'foo/bar:baz', 'silly', stderr=StringIO())
def test_error_output(self):
"""
Verify that error output is actually resolved as the correct string
"""
with self.assertRaisesRegex(CommandError, six.text_type(GitExportError.BAD_COURSE)):
call_command(
'git_export', 'foo/bar:baz', 'silly'
)
with self.assertRaisesRegex(CommandError, six.text_type(GitExportError.URL_BAD)):
call_command(
'git_export', 'foo/bar/baz', 'silly'
)
def test_bad_git_url(self):
"""
Test several bad URLs for validation
"""
course_key = CourseLocator('org', 'course', 'run')
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'Sillyness')
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'example.com:edx/notreal')
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.URL_NO_AUTH)):
git_export_utils.export_to_git(course_key, 'http://blah')
def test_bad_git_repos(self):
"""
Test invalid git repos
"""
test_repo_path = '{}/test_repo'.format(git_export_utils.GIT_REPO_EXPORT_DIR)
self.assertFalse(os.path.isdir(test_repo_path))
course_key = CourseLocator('foo', 'blah', '100-')
# Test bad clones
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:blah@example.com/test_repo.git')
self.assertFalse(os.path.isdir(test_repo_path))
# Setup good repo with bad course to test xml export
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.XML_EXPORT_FAIL)):
git_export_utils.export_to_git(
course_key,
'file://{0}'.format(self.bare_repo_dir))
# Test bad git remote after successful clone
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:blah@example.com/r.git')
@unittest.skipIf(os.environ.get('GIT_CONFIG') or
os.environ.get('GIT_AUTHOR_EMAIL') or
os.environ.get('GIT_AUTHOR_NAME') or
os.environ.get('GIT_COMMITTER_EMAIL') or
os.environ.get('GIT_COMMITTER_NAME'),
'Global git override set')
def test_git_ident(self):
"""
Test valid course with and without user specified.
Test skipped if git global config override environment variable GIT_CONFIG
is set.
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
'enigma'
)
expect_string = '{0}|{1}\n'.format(
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['name'],
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['email']
)
cwd = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR / 'test_bare')
git_log = subprocess.check_output(['git', 'log', '-1',
'--format=%an|%ae'], cwd=cwd).decode('utf-8')
self.assertEqual(expect_string, git_log)
# Make changes to course so there is something to commit
self.populate_course()
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
self.user.username
)
expect_string = '{0}|{1}\n'.format(
self.user.username,
self.user.email,
)
git_log = subprocess.check_output(
['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd).decode('utf-8')
self.assertEqual(expect_string, git_log)
def test_no_change(self):
"""
Test response if there are no changes
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir)
)
with self.assertRaisesRegex(GitExportError, six.text_type(GitExportError.CANNOT_COMMIT)):
git_export_utils.export_to_git(
self.course.id, 'file://{0}'.format(self.bare_repo_dir))
|
pipetree/pipetree
|
refs/heads/master
|
pipetree/__init__.py
|
1
|
from pipetree.stage import LocalFilePipelineStage,\
LocalDirectoryPipelineStage, ExecutorPipelineStage,\
ParameterPipelineStage, IdentityPipelineStage
__version__ = '0.1.0'
STAGES = {
"LocalDirectoryPipelineStage": LocalDirectoryPipelineStage,
"LocalFilePipelineStage": LocalFilePipelineStage,
"ExecutorPipelineStage": ExecutorPipelineStage,
"ParameterPipelineStage": ParameterPipelineStage,
"IdentityPipelineStage": IdentityPipelineStage
}
from pipetree.pipeline import Pipeline
from pipetree.artifact import Item
|
anki1909/peach
|
refs/heads/master
|
peach/nn/nnet.py
|
6
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/nn.py
# Basic topologies of neural networks
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic topologies of neural networks.
This sub-package implements various neural network topologies, see the complete
list below. These topologies are implemented using the ``Layer`` class of the
``base`` sub-package. Please, consult the documentation of that module for more
information on layers of neurons. The neural nets implemented here don't derive
from the ``Layer`` class, instead, they have instance variables to take control
of them. Thus, there is no base class for networks. While subclassing the
classes of this module is usually safe, it is recomended that a new kind of
net is developed from the ground up.
"""
################################################################################
from numpy import array, sum, abs, reshape, sqrt, argmin, zeros, dot
import random
from base import *
from af import *
from lrules import *
################################################################################
class FeedForward(list):
'''
Classic completely connected neural network.
A feedforward neural network is implemented as a list of layers, each layer
being a ``Layer`` object (please consult the documentation on the ``base``
module for more information on layers). The layers are completely connected,
which means that every neuron in one layers is connected to every other
neuron in the following layer.
There is a number of learning methods that are already implemented, but in
general, any learning class derived from ``FFLearning`` can be used. No
other kind of learning can be used. Please, consult the documentation on the
``lrules`` (*learning rules*) module.
'''
def __init__(self, layers, phi=Linear, lrule=BackPropagation, bias=False):
'''
Initializes a feedforward neural network.
A feedforward network is implemented as a list of layers, completely
connected.
:Parameters:
layers
A list of integers containing the shape of the network. The first
element of the list is the number of inputs of the network (or, as
somebody prefer, the number of input neurons); the number of outputs
is the number of neurons in the last layer. Thus, at least two
numbers should be given.
phi
The activation functions to be used with each layer of the network.
Please consult the ``Layer`` documentation in the ``base`` module
for more information. This parameter can be a single function or a
list of functions. If only one function is given, then the same
function is used in every layer. If a list of functions is given,
then the layers use the functions in the sequence given. Note that
heterogeneous networks can be created that way. Defaults to
``Linear``.
lrule
The learning rule used. Only ``FFLearning`` objects (instances of
the class or of the subclasses) are allowed. Defaults to
``BackPropagation``. Check the ``lrules`` documentation for more
information.
bias
If ``True``, then the neurons are biased.
'''
list.__init__(self, [ ])
layers = list(layers)
for n, m in zip(layers[:-1], layers[1:]):
self.append(Layer((m, n), bias=bias))
self.phi = phi
self.__n = len(self)
self.__lrule = lrule
if isinstance(lrule, FFLearning):
self.__lrule = lrule
else:
try:
issubclass(lrule, FFLearning)
self.__lrule = lrule()
except TypeError:
raise ValueError, 'uncompatible learning rule'
def __getnlayers(self):
return self.__n
nlayers = property(__getnlayers, None)
'''Number of layers of the neural network. Not writable.'''
def __getbias(self):
r = [ ]
for l in self:
r.append(l.bias)
return tuple(r)
bias = property(__getbias, None)
'''A tuple containing the bias of each layer. Not writable.'''
def __gety(self):
return self[-1].y
y = property(__gety, None)
'''A list of activation values for each neuron in the last layer of the
network, ie., the answer of the network. This property is available only
after the network is fed some input.'''
def __getphi(self):
r = [ ]
for l in self:
r.append(l.phi)
return tuple(r)
def __setphi(self, phis):
try:
phis = tuple(phis)
for w, v in zip(self, phis):
w.phi = v
except TypeError:
for w in self:
w.phi = phis
phi = property(__getphi, __setphi)
'''Activation functions for every layer in the network. It is a list of
``Activation`` objects, but can be set with only one function. In this case,
the same function is used for every layer.'''
def __call__(self, x):
'''
The feedforward method of the network.
The ``__call__`` interface should be called if the answer of the neuron
network to a given input vector ``x`` is desired. *This method has
collateral effects*, so beware. After the calling of this method, the
``y`` property is set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the network.
:Returns:
The vector containing the answer of every neuron in the last layer, in
the respective order.
'''
for w in self:
x = w(x)
return self[-1].y
def learn(self, x, d):
'''
Applies one example of the training set to the network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``FeedForward`` instances, only ``FFLearning`` learning is allowed.
Also, notice that *this method only applies the learning method!* The
network should be fed with the same input vector before trying to learn
anything first. Consult the ``feed`` and ``train`` methods below for
more ways to train a network.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
self.__lrule(self, x, d)
return sum(abs(d - self.y))
def feed(self, x, d):
'''
Feed the network and applies one example of the training set to the
network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``FeedForward`` instances, only ``FFLearning`` learning is allowed.
Also, notice that *this method feeds the network* before applying the
learning rule. Feeding the network has collateral effects, and some
properties change when this happens. Namely, the ``y`` property is set.
Please consult the ``__call__`` interface.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
self(x)
return self.learn(x, d)
def train(self, train_set, imax=2000, emax=1e-5, randomize=False):
'''
Presents a training set to the network.
This method automatizes the training of the network. Given a training
set, the examples are shown to the network (possibly in a randomized
way). A maximum number of iterations or a maximum admitted error should
be given as a stop condition.
:Parameters:
train_set
The training set is a list of examples. It can have any size and can
contain repeated examples. In fact, the definition of the training
set is open. Each element of the training set, however, should be a
two-tuple ``(x, d)``, where ``x`` is the input vector, and ``d`` is
the desired response of the network for this particular input. See
the ``learn`` and ``feed`` for more information.
imax
The maximum number of iterations. Examples from the training set
will be presented to the network while this limit is not reached.
Defaults to 2000.
emax
The maximum admitted error. Examples from the training set will be
presented to the network until the error obtained is lower than this
limit. Defaults to 1e-5.
randomize
If this is ``True``, then the examples are shown in a randomized
order. If ``False``, then the examples are shown in the same order
that they appear in the ``train_set`` list. Defaults to ``False``.
'''
i = 0
error = 1
s = len(train_set)
while i<imax and error>emax:
if randomize:
x, d = random.choice(train_set)
else:
x, d = train_set[i%s]
error = self.feed(x, d)
i = i+1
return error
################################################################################
class SOM(Layer):
'''
A Self-Organizing Map (SOM).
A self-organizing map is a type of neural network that is trained via
unsupervised learning. In particular, the self-organizing map finds the
neuron closest to an input vector -- this neuron is the winning neuron, and
it is the answer of the network. Thus, the SOM is usually used for
classification and pattern recognition.
The SOM is a single-layer network, so this class subclasses the ``Layer``
class. But some of the properties of a ``Layer`` object are not available or
make no sense in this context.
'''
def __init__(self, shape, lrule=Competitive):
'''
Initializes a self-organizing map.
A self-organizing map is implemented as a layer of neurons. There is no
connection among the neurons. The answer to a given input is the neuron
closer to the given input. ``phi`` (the activation function) ``v`` (the
activation potential) and ``bias`` are not used.
:Parameters:
shape
Stablishes the size of the SOM. It must be a two-tuple of the
format ``(m, n)``, where ``m`` is the number of neurons in the
layer, and ``n`` is the number of inputs of each neuron. The neurons
in the layer all have the same number of inputs.
lrule
The learning rule used. Only ``SOMLearning`` objects (instances of
the class or of the subclasses) are allowed. Defaults to
``Competitive``. Check the ``lrules`` documentation for more
information.
'''
Layer.__init__(self, shape, phi=None, bias=False)
self.__lrule = lrule
self.__y = None
self.__phi = None
if isinstance(lrule, SOMLearning):
self.__lrule = lrule
else:
try:
issubclass(lrule, SOMLearning)
self.__lrule = lrule()
except TypeError:
raise ValueError, 'uncompatible learning rule'
def __gety(self):
if self.__y is None:
raise ValueError, "activation unavailable"
else:
return self.__y
y = property(__gety, None)
'''The winning neuron for a given input, the answer of the network. This
property is available only after the network is fed some input.'''
def __call__(self, x):
'''
The response of the network to a given input.
The ``__call__`` interface should be called if the answer of the neuron
network to a given input vector ``x`` is desired. *This method has
collateral effects*, so beware. After the calling of this method, the
``y`` property is set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the network.
:Returns:
The winning neuron.
'''
x = reshape(x, (1, self.inputs))
dist = sqrt(sum((x - self.weights)**2, axis=1))
self.__y = argmin(dist)
return self.y
def learn(self, x):
'''
Applies one example of the training set to the network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``SOM`` instances, only ``SOMLearning`` learning is allowed.
Also, notice that *this method only applies the learning method!* The
network should be fed with the same input vector before trying to learn
anything first. Consult the ``feed`` and ``train`` methods below for
more ways to train a network.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
:Returns:
The error obtained by the network.
'''
self.__lrule(self, x)
return sum(abs(x - self.y))
def feed(self, x):
'''
Feed the network and applies one example of the training set to the
network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``SOM`` instances, only ``SOMLearning`` learning is allowed.
Also, notice that *this method feeds the network* before applying the
learning rule. Feeding the network has collateral effects, and some
properties change when this happens. Namely, the ``y`` property is set.
Please consult the ``__call__`` interface.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
:Returns:
The error obtained by the network.
'''
self(x)
return self.learn(x)
def train(self, train_set, imax=2000, emax=1e-5, randomize=False):
'''
Presents a training set to the network.
This method automatizes the training of the network. Given a training
set, the examples are shown to the network (possibly in a randomized
way). A maximum number of iterations or a maximum admitted error should
be given as a stop condition.
:Parameters:
train_set
The training set is a list of examples. It can have any size and can
contain repeated examples. In fact, the definition of the training
set is open. Each element of the training set, however, should be a
input vector of the correct dimensions, See the ``learn`` and
``feed`` for more information.
imax
The maximum number of iterations. Examples from the training set
will be presented to the network while this limit is not reached.
Defaults to 2000.
emax
The maximum admitted error. Examples from the training set will be
presented to the network until the error obtained is lower than this
limit. Defaults to 1e-5.
randomize
If this is ``True``, then the examples are shown in a randomized
order. If ``False``, then the examples are shown in the same order
that they appear in the ``train_set`` list. Defaults to ``False``.
'''
i = 0
error = 1
s = len(train_set)
while i<imax and error>emax:
if randomize:
x = random.choice(train_set)
else:
x = train_set[i%s]
error = self.feed(x)
i = i+1
return error
################################################################################
class GRNN(object):
"""
GRNN is the implementation of General Regression Neural Network, a kind of
probabilistic neural network used in regression tasks.
"""
def __init__(self, sigma=0.1):
"""
Initializes the network.
Is not necessary to inform the training set size, GRNN will do it by
itself in ``train`` method.
:Parameters:
sigma
A real number. This value determines the spread of probability
density function (i.e is the smoothness parameter). A great value
for sigma will result in a large spread gaussian and the sample
points will cover a wide range of inputs, while a small value will
create a limited spread gaussian and the sample points will cover a
small range of inputs
"""
self._samples = None
self._targets = None
self.sigma = sigma
def _kernel(self, x1, x2):
"""
This method gives a measure of how well a training sample can represent
the position of prediction (i.e. how well x1 can represent x2, or vice
versa). If the distance D between x1 and x2 is small, result becomes
big. For distance 0 (i.e. x1 == x2), result becomes one and the sample
point is the best representation of prediction point.
In the probabilistic view, this method calculates the probability
distribution.
"""
D = x1-x2
return exp(-dot(D, D)/(2*self.sigma**2))
def train(self, sampleInputs, targets):
"""
Presents a training set to the network.
This method uses the sample inputs to set the size of network.
:Parameters:
sampleInputs
Should be a list of numbers or a list of ``numpy.array`` to set the
sample inputs. These inputs are used to calculate the distance
between prediction points.
targets
The target values of sample inputs. Should be a list of numbers.
"""
self._samples = array(sampleInputs)
self._targets = array(targets)
def __call__(self, x):
"""
The method to predict a value from input ``x``.
:Parameters:
x
The input vector to the network.
:Returns:
The predicted value.
"""
values = [self._kernel(x, x2) for x2 in self._samples]
regular = sum(values)
return dot(values, self._targets)/regular
class PNN(object):
"""
PNN is the implementation of Probabilistic Neural Network, a network used
for classification tasks
"""
def __init__(self, sigma=0.1):
"""
Initializes the network.
Is not necessary to inform the training set size, PNN will do it by
itself in ``train`` method.
:Parameters:
sigma
A real number. This value determines the spread of probability
density function (i.e is the smoothness parameter). A great value
for sigma will result in a large spread gaussian and the sample
points will cover a wide range of inputs, while a small value will
create a limited spread gaussian and the sample points will cover a
small range of inputs
"""
self.sigma = sigma
self._categorys = None
def _kernel(self, x1, x2):
"""
This method gives a measure of how well a training sample can represent
the position of evaluation (i.e. how well x1 can represent x2, or vice
versa). If the distance D between x1 and x2 is small, result becomes
big. For distance 0 (i.e. x1 == x2), result becomes one and the sample
point is the best representation of evaluation point.
In the probabilistic view, this method calculates the probability
distribution.
"""
D = x1-x2
return exp(-dot(D, D)/(2*self.sigma**2))
def train(self, trainSet):
"""
Presents a training set to the network.
This method uses the sample inputs to set the size of network.
:Parameters:
train_set
The training set is a list of examples. It can have any size. In
fact, the definition of the training set is open. Each element of
the training set, however, should be a two-tuple ``(x, d)``, where
``x`` is the input vector, and ``d`` is the desired response of the
network for this particular input, i.e the category of ``x``
pattern.
"""
self._categorys = {}
for pattern, category in trainSet:
if category not in self._categorys:
self._categorys[category] = []
self._categorys[category].append(array(pattern))
def __call__(self, x):
"""
The method to classify the input ``x`` into one of trained category.
:Parameters:
x
The input vector to the network.
:Returns:
The category that best represent the input vector.
"""
sums = {}
for category in self._categorys:
patterns = self._categorys[category]
sums[category] = sum([self._kernel(x, x2) for x2 in patterns])
sums[category] /= float(len(patterns))
return max(sums, key=lambda x:sums[x])
################################################################################
# Test
if __name__ == "__main__":
pass
|
diagramsoftware/odoo
|
refs/heads/8.0
|
addons/website_sale/models/product.py
|
262
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('Promotional Code'),
}
class product_public_category(osv.osv):
_name = "product.public.category"
_description = "Public Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.public.category','Parent Category', select=True),
'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
# NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
# for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
# In this case, the default image is set by the js code.
# NOTE2: image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the category, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata"]
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Comments',
),
'website_published': fields.boolean('Available in the website', copy=False),
'website_description': fields.html('Description for the website', translate=True),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Alternative Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'public_categ_ids': fields.many2many('product.public.category', string='Public Category', help="Those categories are used to group similar products for e-commerce."),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MAX(website_sequence)+1 FROM product_template')
next_sequence = cr.fetchone()[0] or 0
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
'website_published': False,
}
def set_sequence_top(self, cr, uid, ids, context=None):
cr.execute('SELECT MAX(website_sequence) FROM product_template')
max_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context)
def set_sequence_bottom(self, cr, uid, ids, context=None):
cr.execute('SELECT MIN(website_sequence) FROM product_template')
min_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context)
def set_sequence_up(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.set_sequence_top(cr, uid, ids, context=context)
def set_sequence_down(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.set_sequence_bottom(cr, uid, ids, context=context)
class product_product(osv.Model):
_inherit = "product.product"
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.product_tmpl_id.id,)
return res
_columns = {
'website_url': fields.function(_website_url, string="Website url", type="char"),
}
class product_attribute(osv.Model):
_inherit = "product.attribute"
_columns = {
'type': fields.selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], string="Type"),
}
_defaults = {
'type': lambda *a: 'radio',
}
class product_attribute_value(osv.Model):
_inherit = "product.attribute.value"
_columns = {
'color': fields.char("HTML Color Index", help="Here you can set a specific HTML color index (e.g. #ff0000) to display the color on the website if the attibute type is 'Color'."),
}
|
javierTerry/odoo
|
refs/heads/8.0
|
addons/website_sale/models/product.py
|
262
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('Promotional Code'),
}
class product_public_category(osv.osv):
_name = "product.public.category"
_description = "Public Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.public.category','Parent Category', select=True),
'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
# NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
# for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
# In this case, the default image is set by the js code.
# NOTE2: image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the category, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized image", type="binary", multi="_get_image",
store={
'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata"]
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Comments',
),
'website_published': fields.boolean('Available in the website', copy=False),
'website_description': fields.html('Description for the website', translate=True),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Alternative Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'public_categ_ids': fields.many2many('product.public.category', string='Public Category', help="Those categories are used to group similar products for e-commerce."),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MAX(website_sequence)+1 FROM product_template')
next_sequence = cr.fetchone()[0] or 0
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
'website_published': False,
}
def set_sequence_top(self, cr, uid, ids, context=None):
cr.execute('SELECT MAX(website_sequence) FROM product_template')
max_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context)
def set_sequence_bottom(self, cr, uid, ids, context=None):
cr.execute('SELECT MIN(website_sequence) FROM product_template')
min_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context)
def set_sequence_up(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.set_sequence_top(cr, uid, ids, context=context)
def set_sequence_down(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.set_sequence_bottom(cr, uid, ids, context=context)
class product_product(osv.Model):
_inherit = "product.product"
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.product_tmpl_id.id,)
return res
_columns = {
'website_url': fields.function(_website_url, string="Website url", type="char"),
}
class product_attribute(osv.Model):
_inherit = "product.attribute"
_columns = {
'type': fields.selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], string="Type"),
}
_defaults = {
'type': lambda *a: 'radio',
}
class product_attribute_value(osv.Model):
_inherit = "product.attribute.value"
_columns = {
'color': fields.char("HTML Color Index", help="Here you can set a specific HTML color index (e.g. #ff0000) to display the color on the website if the attibute type is 'Color'."),
}
|
udapi/udapi-python
|
refs/heads/master
|
udapi/block/write/tikz.py
|
1
|
"""Tikz class is a writer for LaTeX with tikz-dependency."""
import logging
from udapi.core.basewriter import BaseWriter
class Tikz(BaseWriter):
r"""A writer of files in the LaTeX with tikz-dependency format.
Usage::
udapy write.Tikz < my.conllu > my.tex
# or for 2D tree-like rendering
udapy write.Tikz as_tree=1 < my.conllu > my.tex
pdflatex my.tex
xdg-open my.pdf
Long sentences may result in too large pictures.
You can tune the width (in addition to changing fontsize or using minipage and rescaling) with
``\begin{deptext}[column sep=0.2cm]``
or individually for each word:
``My \&[.5cm] dog \& etc.``
By default, the height of the horizontal segment of a dependency edge is proportional
to the distance between the linked words. You can tune the height with:
``\depedge[edge unit distance=1.5ex]{9}{1}{deprel}``
See `tikz-dependency documentation
<http://mirrors.ctan.org/graphics/pgf/contrib/tikz-dependency/tikz-dependency-doc.pdf>`_
for details.
With ``as_tree=1``, there are two options how to visualize deprels:
either as labels positioned on the edges by uncommenting the relevant style definition,
or by adding ``deprel`` to the list of attributes, so deprels are above/below the words.
The latter is the default because the edge labels need manual tweaks to prevent overlapping.
Alternatives:
* use `write.TextModeTrees` and include it in verbatim environment in LaTeX.
* use `write.Html`, press "Save as SVG" button, convert to pdf and include in LaTeX.
"""
def __init__(self, print_sent_id=True, print_text=True, print_preambule=True,
attributes=None, as_tree=False, comment_attribute=None, **kwargs):
"""Create the Tikz block object.
Args:
print_sent_id: print sent_id (`tree.address()`) as a LaTeX comment (default=True)
print_text: print sentence text (`tree.get_sentence()`) as a LaTeX comment (default=True)
print_preambule: surround each document with LaTeX preambule (`documentclass` etc)
and `end{document}` (default=True)
attributes: comma-separated list of node attributes to print (each on a separate line).
as_tree: boolean - should print it as a 2D tree?
comment_attribute: which attribute to print as a string under each graph (e.g. text_en)
"""
super().__init__(**kwargs)
self.print_sent_id = print_sent_id
self.print_text = print_text
self.print_preambule = print_preambule
if attributes is not None:
self.node_attributes = attributes.split(',')
elif as_tree:
self.node_attributes = 'form,upos,deprel'.split(',')
else:
self.node_attributes = 'form,upos'.split(',')
self.as_tree = as_tree
self.comment_attribute = comment_attribute
def before_process_document(self, doc):
super().before_process_document(doc)
if self.print_preambule:
print(r'\documentclass[multi=dependency]{standalone}')
print(r'\usepackage[T1]{fontenc}')
print(r'\usepackage[utf8]{inputenc}')
print(r'\usepackage{tikz-dependency}')
if self.as_tree:
print(r'\tikzset{depedge/.style = {blue,thick}, %,<-')
print(r' deplabel/.style = {opacity=0, %black, fill opacity=0.9, text opacity=1,')
print(r' % yshift=4pt, pos=0.1, inner sep=0, fill=white, font={\scriptsize}')
print(r' },')
print(r' depnode/.style = {draw,circle,fill,blue,inner sep=1.5pt},')
print(r' depguide/.style = {dashed,gray},')
print(r'}')
print(r'\newlength{\deplevel}\setlength{\deplevel}{8mm}')
print(r'\newlength{\depskip}\setlength{\depskip}{4mm}')
print(r'\newcommand{\deptrans}[1]{\node (t) at (\matrixref.south)[yshift=-1mm]'
" {``#1''};}")
print(r'\begin{document}')
def after_process_document(self, doc):
if self.print_preambule:
print(r'\end{document}')
logging.info('Use pdflatex to compile the output')
super().after_process_document(doc)
def _tex_escape(self, string):
return string.replace('_', r'\_').replace('$', '\$').replace('[', '$[$').replace(']', '$]$')
def process_tree(self, tree):
print(r'\begin{dependency}')
print(r'\begin{deptext}')
nodes = tree.descendants
if self.print_sent_id:
print('% sent_id = ' + tree.address())
if self.print_text:
print("% text = " + tree.get_sentence())
comment = tree.comment
if comment:
comment = comment.rstrip()
print('%' + comment.replace('\n', '\n%'))
lines = ['' for _ in self.node_attributes]
for node in nodes:
values = [self._tex_escape(v) for v in node.get_attrs(self.node_attributes)]
max_len = max(len(value) for value in values)
for index, value in enumerate(values):
if node.ord > 1:
lines[index] += r' \& '
lines[index] += value.ljust(max_len)
for line in lines:
print(line + r' \\')
print(r'\end{deptext}')
if self.as_tree:
depths = [n._get_attr('depth') for n in nodes]
max_depth = max(depths)
for node in nodes:
print(r'\node (w%d) [yshift=\depskip+%s\deplevel,depnode] at (\wordref{1}{%d}) {};'
% (node.ord, max_depth - depths[node.ord - 1], node.ord))
for node in nodes:
print(r'\draw[depguide] (w%d)--(\wordref{1}{%d});' % (node.ord, node.ord), end='')
if node.parent.is_root():
print('')
else:
print(r' \draw[depedge] (w%d)--node[deplabel] {%s} (w%d);'
% (node.ord, node.deprel, node.parent.ord))
else:
for node in nodes:
if node.parent.is_root():
print(r'\deproot{%d}{root}' % node.ord)
else:
print(r'\depedge{%d}{%d}{%s}' % (node.parent.ord, node.ord, node.deprel))
if self.comment_attribute and tree.comment:
start_pos = tree.comment.find(self.comment_attribute + ' = ')
if start_pos != -1:
start_pos += len(self.comment_attribute) + 3
end_pos = tree.comment.find('\n', start_pos)
print(r'\deptrans{' + tree.comment[start_pos:end_pos])
print(r'\end{dependency}')
print('') # empty line marks a new paragraph in LaTeX, but multi=dependency causes newpage
|
40223101/w16b_test
|
refs/heads/master
|
2015cd_midterm-master/static/Brython3.1.0-20150301-090019/Lib/textwrap.py
|
745
|
"""Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
|
rdev-hackaton/msze_www
|
refs/heads/master
|
msze_www/settings/__init__.py
|
1
|
# -*- coding: utf-8 -*-
DEBUG = False
MOBILE_APP_STORE_URL = (
'https://play.google.com/store/apps/'
'details?id=com.ionicframework.msze322845'
)
DATAFILE_URL_PATH = '/api/datafile.json'
DATAFILE_FILEPATH = 'data/datafile.json'
CACHE_TIME = 24 * 60 * 60 # 24h
|
luwei0917/awsemmd_script
|
refs/heads/master
|
small_script/cross_q.py
|
1
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
# compute cross Q for every pdb pair in one folder
parser = argparse.ArgumentParser(description="Compute cross q")
parser.add_argument("-m", "--mode",
type=int, default=5)
args = parser.parse_args()
if args.mode == 5:
# files = [32, 41, 47, 48, 57, 58, 6, 63, 69, 72, 82, 96]
files = [0,1,10,11,12,13,14,15,16,17,18,19,2]
n = len(files)
cross_q = open('matrix', 'w')
with open("cross_q", "w") as f:
for name_i in files:
print(name_i)
for name_j in files:
script = "python2 ~/opt/small_script/CalcQValueFrom2Pdb.py {} {}".format(name_i, name_j)
result = subprocess.check_output(script, shell=True).decode("utf-8")
# print(result.strip('\n'))
# f.write(result.strip('\n'))
i = name_i
j = name_j
f.write("{}, {}, {}".format(i, j, result))
cross_q.write(result)
if args.mode == 1:
files = glob.glob("*.pdb")
n = len(files)
cross_q = open('matrix', 'w')
with open("cross_q", "w") as f:
for name_i in files:
print(name_i)
for name_j in files:
script = "python2 ~/opt/small_script/CalcQValueFrom2Pdb.py {} {}".format(name_i, name_j)
result = subprocess.check_output(script, shell=True).decode("utf-8")
# print(result.strip('\n'))
# f.write(result.strip('\n'))
i = re.findall('\d+', name_i)[0]
j = re.findall('\d+', name_j)[0]
f.write("{}, {}, {}".format(i, j, result))
cross_q.write(result)
if args.mode == 2:
cross_q = open('cross_q', 'w')
n = 20
for i in range(n):
print(i)
for j in range(n):
script = "python2 ~/opt/small_script/CalcQValueFrom2Pdb.py {}.pdb {}.pdb".format(i, j)
# x = subprocess.check_output()
result = subprocess.check_output(script, shell=True).decode("utf-8")
# print(result.strip('\n'))
cross_q.write(result.strip('\n'))
cross_q.write(" ")
cross_q.write("\n")
if args.mode == 3:
cross_q = open('matrix', 'w')
n = 20
for i in range(n):
print(i)
for j in range(n):
script = "python2 ~/opt/small_script/CalcQValueFrom2Pdb.py {}.pdb {}.pdb".format(i, j)
# x = subprocess.check_output()
result = subprocess.check_output(script, shell=True).decode("utf-8")
# print(result.strip('\n'))
cross_q.write(result)
# cross_q.write(" ")
# cross_q.write("\n")
if args.mode == 4:
files = glob.glob("*.pdb")
n = len(files)
with open("matrix", "w") as f:
for name_i in files:
print(name_i)
for name_j in files:
script = "python2 ~/opt/small_script/CalcQValueFrom2Pdb.py {} {}".format(name_i, name_j)
result = subprocess.check_output(script, shell=True).decode("utf-8")
# print(result.strip('\n'))
# f.write(result.strip('\n'))
i = re.findall('\d+', name_i)[0]
j = re.findall('\d+', name_j)[0]
# f.write("{}, {}, {}".format(i, j, result))
f.write(result)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/hair/human/shared_hair_human_female_s17.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/human/shared_hair_human_female_s17.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
2014c2g5/cd0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/multiprocessing/util.py
|
696
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
|
littlstar/chromium.src
|
refs/heads/nw
|
chrome/browser/sync/PRESUBMIT.py
|
32
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/sync.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_chromium_rel_swarming': set(['defaulttests']),
}
}
|
esi-mineset/spark
|
refs/heads/master
|
examples/src/main/python/sql/datasource.py
|
20
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Spark SQL data sources.
Run with:
./bin/spark-submit examples/src/main/python/sql/datasource.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on:schema_merging$
from pyspark.sql import Row
# $example off:schema_merging$
def basic_datasource_example(spark):
# $example on:generic_load_save_functions$
df = spark.read.load("examples/src/main/resources/users.parquet")
df.select("name", "favorite_color").write.save("namesAndFavColors.parquet")
# $example off:generic_load_save_functions$
# $example on:write_partitioning$
df.write.partitionBy("favorite_color").format("parquet").save("namesPartByColor.parquet")
# $example off:write_partitioning$
# $example on:write_partition_and_bucket$
df = spark.read.parquet("examples/src/main/resources/users.parquet")
(df
.write
.partitionBy("favorite_color")
.bucketBy(42, "name")
.saveAsTable("people_partitioned_bucketed"))
# $example off:write_partition_and_bucket$
# $example on:manual_load_options$
df = spark.read.load("examples/src/main/resources/people.json", format="json")
df.select("name", "age").write.save("namesAndAges.parquet", format="parquet")
# $example off:manual_load_options$
# $example on:manual_load_options_csv$
df = spark.read.load("examples/src/main/resources/people.csv",
format="csv", sep=":", inferSchema="true", header="true")
# $example off:manual_load_options_csv$
# $example on:write_sorting_and_bucketing$
df.write.bucketBy(42, "name").sortBy("age").saveAsTable("people_bucketed")
# $example off:write_sorting_and_bucketing$
# $example on:direct_sql$
df = spark.sql("SELECT * FROM parquet.`examples/src/main/resources/users.parquet`")
# $example off:direct_sql$
spark.sql("DROP TABLE IF EXISTS people_bucketed")
spark.sql("DROP TABLE IF EXISTS people_partitioned_bucketed")
def parquet_example(spark):
# $example on:basic_parquet_example$
peopleDF = spark.read.json("examples/src/main/resources/people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.parquet("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# $example off:basic_parquet_example$
def parquet_schema_merging_example(spark):
# $example on:schema_merging$
# spark is from the previous example.
# Create a simple DataFrame, stored into a partition directory
sc = spark.sparkContext
squaresDF = spark.createDataFrame(sc.parallelize(range(1, 6))
.map(lambda i: Row(single=i, double=i ** 2)))
squaresDF.write.parquet("data/test_table/key=1")
# Create another DataFrame in a new partition directory,
# adding a new column and dropping an existing column
cubesDF = spark.createDataFrame(sc.parallelize(range(6, 11))
.map(lambda i: Row(single=i, triple=i ** 3)))
cubesDF.write.parquet("data/test_table/key=2")
# Read the partitioned table
mergedDF = spark.read.option("mergeSchema", "true").parquet("data/test_table")
mergedDF.printSchema()
# The final schema consists of all 3 columns in the Parquet files together
# with the partitioning column appeared in the partition directory paths.
# root
# |-- double: long (nullable = true)
# |-- single: long (nullable = true)
# |-- triple: long (nullable = true)
# |-- key: integer (nullable = true)
# $example off:schema_merging$
def json_dataset_example(spark):
# $example on:json_dataset$
# spark is from the previous example.
sc = spark.sparkContext
# A JSON dataset is pointed to by path.
# The path can be either a single text file or a directory storing text files
path = "examples/src/main/resources/people.json"
peopleDF = spark.read.json(path)
# The inferred schema can be visualized using the printSchema() method
peopleDF.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Creates a temporary view using the DataFrame
peopleDF.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by spark
teenagerNamesDF = spark.sql("SELECT name FROM people WHERE age BETWEEN 13 AND 19")
teenagerNamesDF.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# Alternatively, a DataFrame can be created for a JSON dataset represented by
# an RDD[String] storing one JSON object per string
jsonStrings = ['{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}']
otherPeopleRDD = sc.parallelize(jsonStrings)
otherPeople = spark.read.json(otherPeopleRDD)
otherPeople.show()
# +---------------+----+
# | address|name|
# +---------------+----+
# |[Columbus,Ohio]| Yin|
# +---------------+----+
# $example off:json_dataset$
def jdbc_dataset_example(spark):
# $example on:jdbc_dataset$
# Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
# Loading data from a JDBC source
jdbcDF = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.load()
jdbcDF2 = spark.read \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Specifying dataframe column data types on read
jdbcDF3 = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.option("customSchema", "id DECIMAL(38, 0), name STRING") \
.load()
# Saving data to a JDBC source
jdbcDF.write \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.save()
jdbcDF2.write \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Specifying create table column data types on write
jdbcDF.write \
.option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# $example off:jdbc_dataset$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Spark SQL data source example") \
.getOrCreate()
basic_datasource_example(spark)
parquet_example(spark)
parquet_schema_merging_example(spark)
json_dataset_example(spark)
jdbc_dataset_example(spark)
spark.stop()
|
LPM-HMS/COSMOS-2.0
|
refs/heads/master
|
cosmos/job/drm/drm_lsf.py
|
2
|
import subprocess as sp
import re
import os
from cosmos.job.drm.DRM_Base import DRM
from cosmos.job.drm.util import exit_process_group
from cosmos import TaskStatus
decode_lsf_state = dict(
[
("UNKWN", "process status cannot be determined"),
("PEND", "job is queued and active"),
("PSUSP", "job suspended while pending"),
("RUN", "job is running"),
("SSUSP", "job is system suspended"),
("USUSP", "job is user suspended"),
("DONE", "job finished normally"),
("EXIT", "job finished, but failed"),
]
)
class DRM_LSF(DRM):
name = "lsf"
poll_interval = 5
def submit_job(self, task):
if task.environment_variables is not None:
raise NotImplementedError
ns = " " + task.drm_native_specification if task.drm_native_specification else ""
bsub = "bsub -o {stdout} -e {stderr}{ns} ".format(
stdout=task.output_stdout_path, stderr=task.output_stderr_path, ns=ns
)
try:
out = sp.check_output(
'{bsub} "{cmd_str}"'.format(
cmd_str=self.jobmanager.get_command_str(task), bsub=bsub
).decode(),
env=os.environ,
preexec_fn=exit_process_group,
shell=True,
).decode()
task.drm_jobID = str(int(re.search(r"Job <(\d+)>", out).group(1)))
except (sp.CalledProcessError, ValueError):
task.log.error("%s failed submission to %s: %s" % (task, task.drm, out))
task.status = TaskStatus.failed
else:
task.status = TaskStatus.submitted
def filter_is_done(self, tasks):
if len(tasks):
bjobs = bjobs_all()
def is_done(task):
jid = str(task.drm_jobID)
if jid not in bjobs:
# prob in history
# print 'missing %s %s' % (task, task.drm_jobID)
return True
else:
return bjobs[jid]["STAT"] in ["DONE", "EXIT", "UNKWN", "ZOMBI"]
return list(filter(is_done, tasks))
else:
return []
def drm_statuses(self, tasks):
"""
:param tasks: tasks that have been submitted to the job manager
:returns: (dict) task.drm_jobID -> drm_status
"""
if len(tasks):
bjobs = bjobs_all()
def f(task):
return bjobs.get(str(task.drm_jobID), dict()).get("STAT", "UNK_JOB_STATE")
return {task.drm_jobID: f(task) for task in tasks}
else:
return {}
def kill(self, task):
"Terminates a task"
raise NotImplementedError
# os.system('bkill {0}'.format(task.drm_jobID))
def kill_tasks(self, tasks):
for t in tasks:
sp.check_call(["bkill", str(t.drm_jobID)], preexec_function=exit_process_group)
def bjobs_all():
"""
returns a dict keyed by lsf job ids, who's values are a dict of bjob
information about the job
"""
try:
lines = sp.check_output(["bjobs", "-a"], preexec_function=exit_process_group).decode().split("\n")
except (sp.CalledProcessError, OSError):
return {}
bjobs = {}
header = re.split("\s\s+", lines[0])
for l in lines[1:]:
items = re.split("\s\s+", l)
bjobs[items[0]] = dict(list(zip(header, items)))
return bjobs
|
marty331/jakesclock
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pyglet/event.py
|
25
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Event dispatch framework.
All objects that produce events in pyglet implement `EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''. If you are creating a
new class which implements `EventDispatcher`, you must call
`EventDispatcher.register_event_type` for each event type.
Attaching event handlers
========================
An event handler is simply a function or method. You can attach an event
handler by setting the appropriate function on the instance::
def on_resize(width, height):
# ...
dispatcher.on_resize = on_resize
There is also a convenience decorator that reduces typing::
@dispatcher.event
def on_resize(width, height):
# ...
You may prefer to subclass and override the event handlers instead::
class MyDispatcher(DispatcherClass):
def on_resize(self, width, height):
# ...
Event handler stack
===================
When attaching an event handler to a dispatcher using the above methods, it
replaces any existing handler (causing the original handler to no longer be
called). Each dispatcher maintains a stack of event handlers, allowing you to
insert an event handler "above" the existing one rather than replacing it.
There are two main use cases for "pushing" event handlers:
* Temporarily intercepting the events coming from the dispatcher by pushing a
custom set of handlers onto the dispatcher, then later "popping" them all
off at once.
* Creating "chains" of event handlers, where the event propagates from the
top-most (most recently added) handler to the bottom, until a handler
takes care of it.
Use `EventDispatcher.push_handlers` to create a new level in the stack and
attach handlers to it. You can push several handlers at once::
dispatcher.push_handlers(on_resize, on_key_press)
If your function handlers have different names to the events they handle, use
keyword arguments::
dispatcher.push_handlers(on_resize=my_resize,
on_key_press=my_key_press)
After an event handler has processed an event, it is passed on to the
next-lowest event handler, unless the handler returns `EVENT_HANDLED`, which
prevents further propagation.
To remove all handlers on the top stack level, use
`EventDispatcher.pop_handlers`.
Note that any handlers pushed onto the stack have precedence over the
handlers set directly on the instance (for example, using the methods
described in the previous section), regardless of when they were set.
For example, handler ``foo`` is called before handler ``bar`` in the following
example::
dispatcher.push_handlers(on_resize=foo)
dispatcher.on_resize = bar
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Event
handlers are only ever invoked as a result of calling
EventDispatcher.dispatch_events`.
It is up to the specific event dispatcher to queue relevant events until they
can be dispatched, at which point the handlers are called in the order the
events were originally generated.
This implies that your application runs with a main loop that continuously
updates the application state and checks for new events::
while True:
dispatcher.dispatch_events()
# ... additional per-frame processing
Not all event dispatchers require the call to ``dispatch_events``; check with
the particular class documentation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import inspect
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
'''An exception raised when an event handler could not be attached.
'''
pass
class EventDispatcher(object):
'''Generic event dispatcher interface.
See the module docstring for usage.
'''
# Placeholder empty stack; real stack is created only if needed
_event_stack = ()
@classmethod
def register_event_type(cls, name):
'''Register an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
'''
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
return name
def push_handlers(self, *args, **kwargs):
'''Push a level onto the top of the handler stack, then attach zero or
more event handlers.
If keyword arguments are given, they name the event type to attach.
Otherwise, a callable's `__name__` attribute will be used. Any other
object may also be specified, in which case it will be searched for
callables with event names.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = []
# Place dict full of new handlers at beginning of stack
self._event_stack.insert(0, {})
self.set_handlers(*args, **kwargs)
def _get_handlers(self, args, kwargs):
'''Implement handler matching on arguments for set_handlers and
remove_handlers.
'''
for object in args:
if inspect.isroutine(object):
# Single magically named function
name = object.__name__
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, object
else:
# Single instance with magically named methods
for name in dir(object):
if name in self.event_types:
yield name, getattr(object, name)
for name, handler in kwargs.items():
# Function for handling given event (no magic)
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, handler
def set_handlers(self, *args, **kwargs):
'''Attach one or more event handlers to the top level of the handler
stack.
See `push_handlers` for the accepted argument types.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
for name, handler in self._get_handlers(args, kwargs):
self.set_handler(name, handler)
def set_handler(self, name, handler):
'''Attach a single event handler.
:Parameters:
`name` : str
Name of the event type to attach to.
`handler` : callable
Event handler to attach.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
self._event_stack[0][name] = handler
def pop_handlers(self):
'''Pop the top level of event handlers off the stack.
'''
assert self._event_stack and 'No handlers pushed'
del self._event_stack[0]
def remove_handlers(self, *args, **kwargs):
'''Remove event handlers from the event stack.
See `push_handlers` for the accepted argument types. All handlers
are removed from the first stack frame that contains any of the given
handlers. No error is raised if any handler does not appear in that
frame, or if no stack frame contains any of the given handlers.
If the stack frame is empty after removing the handlers, it is
removed from the stack. Note that this interferes with the expected
symmetry of `push_handlers` and `pop_handlers`.
'''
handlers = list(self._get_handlers(args, kwargs))
# Find the first stack frame containing any of the handlers
def find_frame():
for frame in self._event_stack:
for name, handler in handlers:
try:
if frame[name] == handler:
return frame
except KeyError:
pass
frame = find_frame()
# No frame matched; no error.
if not frame:
return
# Remove each handler from the frame.
for name, handler in handlers:
try:
if frame[name] == handler:
del frame[name]
except KeyError:
pass
# Remove the frame if it's empty.
if not frame:
self._event_stack.remove(frame)
def remove_handler(self, name, handler):
'''Remove a single event handler.
The given event handler is removed from the first handler stack frame
it appears in. The handler must be the exact same callable as passed
to `set_handler`, `set_handlers` or `push_handlers`; and the name
must match the event type it is bound to.
No error is raised if the event handler is not set.
:Parameters:
`name` : str
Name of the event type to remove.
`handler` : callable
Event handler to remove.
'''
for frame in self._event_stack:
try:
if frame[name] == handler:
del frame[name]
break
except KeyError:
pass
def dispatch_event(self, event_type, *args):
'''Dispatch a single event to the attached handlers.
The event is propagated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
`EventDispatcher` implementors; applications should call
the ``dispatch_events`` method.
Since pyglet 1.2, the method returns `EVENT_HANDLED` if an event
handler returned `EVENT_HANDLED` or `EVENT_UNHANDLED` if all events
returned `EVENT_UNHANDLED`. If no matching event handlers are in the
stack, ``False`` is returned.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
:rtype: bool or None
:return: (Since pyglet 1.2) `EVENT_HANDLED` if an event handler
returned `EVENT_HANDLED`; `EVENT_UNHANDLED` if one or more event
handlers were invoked but returned only `EVENT_UNHANDLED`;
otherwise ``False``. In pyglet 1.1 and earler, the return value
is always ``None``.
'''
assert event_type in self.event_types, "%r not found in %r.event_types == %r" % (event_type, self, self.event_types)
invoked = False
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
handler = frame.get(event_type, None)
if handler:
try:
invoked = True
if handler(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(event_type, args, handler)
# Check instance for an event handler
if hasattr(self, event_type):
try:
invoked = True
if getattr(self, event_type)(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(
event_type, args, getattr(self, event_type))
if invoked:
return EVENT_UNHANDLED
return False
def _raise_dispatch_exception(self, event_type, args, handler):
# A common problem in applications is having the wrong number of
# arguments in an event handler. This is caught as a TypeError in
# dispatch_event but the error message is obfuscated.
#
# Here we check if there is indeed a mismatch in argument count,
# and construct a more useful exception message if so. If this method
# doesn't find a problem with the number of arguments, the error
# is re-raised as if we weren't here.
n_args = len(args)
# Inspect the handler
handler_args, handler_varargs, _, handler_defaults = \
inspect.getargspec(handler)
n_handler_args = len(handler_args)
# Remove "self" arg from handler if it's a bound method
if inspect.ismethod(handler) and handler.im_self:
n_handler_args -= 1
# Allow *args varargs to overspecify arguments
if handler_varargs:
n_handler_args = max(n_handler_args, n_args)
# Allow default values to overspecify arguments
if (n_handler_args > n_args and
handler_defaults and
n_handler_args - len(handler_defaults) <= n_args):
n_handler_args = n_args
if n_handler_args != n_args:
if inspect.isfunction(handler) or inspect.ismethod(handler):
descr = '%s at %s:%d' % (
handler.func_name,
handler.func_code.co_filename,
handler.func_code.co_firstlineno)
else:
descr = repr(handler)
raise TypeError(
'%s event was dispatched with %d arguments, but '
'handler %s has an incompatible function signature' %
(event_type, len(args), descr))
else:
raise
def event(self, *args):
'''Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
'''
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
self.set_handler(name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
self.set_handler(name, func)
return args[0]
elif type(args[0]) in (str, unicode): # @window.event('on_resize')
name = args[0]
def decorator(func):
self.set_handler(name, func)
return func
return decorator
|
xzturn/caffe2
|
refs/heads/master
|
caffe2/python/operator_test/deform_conv_test.py
|
5
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import assume, given
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import unittest
def _cudnn_supports(
dilation=False,
nhwc=False,
):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
if dilation and v < 6000:
# Dilation not supported until v6
return False
if dilation and nhwc:
# Dilation and NHWC not supported together
return False
return True
def _conv_1d_output_size(size, kernel, pad, dilation, stride):
return max(
1,
int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1
)
def _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation,
stride_h, stride_w):
return [
_conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),
_conv_1d_output_size(size, kernel, pad_w, dilation, stride_w)
]
def _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad_h,
pad_w,
dilation,
stride_h,
stride_w,
deformable_group
):
dims = [batch_size, 2 * kernel * kernel * deformable_group]
dims.extend(_conv_2d_output_size(size, kernel, pad_h, pad_w,
dilation, stride_h, stride_w))
return dims
def _conv_2d_random_offsets(
batch_size,
kernel,
dims,
num_deformable_group
):
o = []
for y0 in range(0, kernel):
for x0 in range(0, kernel):
# stay away from integer offsets which correspond to "ridges" on the
# interpolated surface resulting in less precise estimates
x = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
y = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
o.append(y - y0)
o.append(x - x0)
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * dims[1]] * dims[0])
return np.array([e] * batch_size).astype(np.float32)
def _conv_2d_shuffle_offsets(
batch_size,
kernel,
dims,
num_deformable_group,
input_channels,
output_channels
):
o = []
w0 = [[0 for x in range(kernel)] for y in range(kernel)]
for y0 in range(0, kernel):
for x0 in range(0, kernel):
x = np.random.randint(0, kernel)
y = np.random.randint(0, kernel)
o.append(y - y0)
o.append(x - x0)
w0[y][x] += 1
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * int(dims[1])] * int(dims[0]))
w0 = [[w0] * input_channels] * output_channels
return (
np.array([e] * batch_size).astype(np.float32),
np.array(w0).astype(np.float32).transpose((0, 2, 3, 1))
)
class TestConvolution(hu.HypothesisTestCase):
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_null_offset_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias, deformable_group,
gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
offset_dims = _conv_2d_offsets_dims(batch_size, size, kernel, pad, pad,
dilation, stride, stride,
deformable_group)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
o = np.zeros(tuple(offset_dims), np.float32)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only)
def test_flat_input_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias,
deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.ones((batch_size, size, size, input_channels), np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o = _conv_2d_random_offsets(batch_size, kernel, output_size,
deformable_group)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 1),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 1),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only)
def test_shuffle_input_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias,
deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o, w0 = _conv_2d_shuffle_offsets(batch_size, kernel, output_size,
deformable_group, input_channels,
output_channels)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
w0 = w0.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
with core.DeviceScope(gc):
workspace.FeedBlob("w0", w0)
reference_op = core.CreateOperator(
"Conv",
["X", "w0", "b"] if use_bias else ["X", "w0"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
# CUDNN does NOT support different padding values and we skip it
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride_h=st.integers(1, 3),
stride_w=st.integers(1, 3),
pad_h=st.integers(0, 3),
pad_w=st.integers(0, 3),
kernel=st.integers(2, 5),
size=st.integers(1, 8),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "EIGEN"]),
shared_buffer=st.booleans(),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_conv_separate_stride_pad_gradients(self, stride_h, stride_w,
pad_h, pad_w, kernel, size,
input_channels, output_channels,
batch_size, order, engine,
shared_buffer, use_bias,
deformable_group, gc, dc):
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_h,
pad_l=pad_w,
pad_b=pad_h,
pad_r=pad_w,
kernel=kernel,
order=order,
engine=engine,
shared_buffer=int(shared_buffer),
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad_h, pad_w, 1,
stride_h, stride_w)
o = _conv_2d_random_offsets(batch_size, kernel, output_size,
deformable_group)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad_h < kernel or size + pad_w < kernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_conv_gradients(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size, order,
engine, use_bias, deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
alexthered/kienhoc-platform
|
refs/heads/master
|
common/djangoapps/django_comment_common/models.py
|
11
|
import logging
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.utils.translation import ugettext_noop
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
FORUM_ROLE_ADMINISTRATOR = ugettext_noop('Administrator')
FORUM_ROLE_MODERATOR = ugettext_noop('Moderator')
FORUM_ROLE_COMMUNITY_TA = ugettext_noop('Community TA')
FORUM_ROLE_STUDENT = ugettext_noop('Student')
@receiver(post_save, sender=CourseEnrollment)
def assign_default_role_on_enrollment(sender, instance, **kwargs):
"""
Assign forum default role 'Student'
"""
# The code below would remove all forum Roles from a user when they unenroll
# from a course. Concerns were raised that it should apply only to students,
# or that even the history of student roles is important for research
# purposes. Since this was new functionality being added in this release,
# I'm just going to comment it out for now and let the forums team deal with
# implementing the right behavior.
#
# # We've unenrolled the student, so remove all roles for this course
# if not instance.is_active:
# course_roles = list(Role.objects.filter(course_id=instance.course_id))
# instance.user.roles.remove(*course_roles)
# return
# We've enrolled the student, so make sure they have the Student role
assign_default_role(instance.course_id, instance.user)
def assign_default_role(course_id, user):
"""
Assign forum default role 'Student' to user
"""
role, __ = Role.objects.get_or_create(course_id=course_id, name=FORUM_ROLE_STUDENT)
user.roles.add(role)
class Role(models.Model):
objects = NoneToEmptyManager()
name = models.CharField(max_length=30, null=False, blank=False)
users = models.ManyToManyField(User, related_name="roles")
course_id = CourseKeyField(max_length=255, blank=True, db_index=True)
class Meta(object):
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_role'
def __unicode__(self):
# pylint: disable=no-member
return self.name + " for " + (self.course_id.to_deprecated_string() if self.course_id else "all courses")
# TODO the name of this method is a little bit confusing,
# since it's one-off and doesn't handle inheritance later
def inherit_permissions(self, role):
"""
Make this role inherit permissions from the given role.
Permissions are only added, not removed. Does not handle inheritance.
"""
if role.course_id and role.course_id != self.course_id:
logging.warning(
"%s cannot inherit permissions from %s due to course_id inconsistency",
self,
role,
)
for per in role.permissions.all():
self.add_permission(per)
def add_permission(self, permission):
self.permissions.add(Permission.objects.get_or_create(name=permission)[0])
def has_permission(self, permission):
"""Returns True if this role has the given permission, False otherwise."""
course = modulestore().get_course(self.course_id)
if course is None:
raise ItemNotFoundError(self.course_id)
if permission_blacked_out(course, {self.name}, permission):
return False
return self.permissions.filter(name=permission).exists() # pylint: disable=no-member
class Permission(models.Model):
name = models.CharField(max_length=30, null=False, blank=False, primary_key=True)
roles = models.ManyToManyField(Role, related_name="permissions")
class Meta(object):
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_permission'
def __unicode__(self):
return self.name
def permission_blacked_out(course, role_names, permission_name):
"""Returns true if a user in course with the given roles would have permission_name blacked out.
This will return true if it is a permission that the user might have normally had for the course, but does not have
right this moment because we are in a discussion blackout period (as defined by the settings on the course module).
Namely, they can still view, but they can't edit, update, or create anything. This only applies to students, as
moderators of any kind still have posting privileges during discussion blackouts.
"""
return (
not course.forum_posts_allowed and
role_names == {FORUM_ROLE_STUDENT} and
any([permission_name.startswith(prefix) for prefix in ['edit', 'update', 'create']])
)
def all_permissions_for_user_in_course(user, course_id): # pylint: disable=invalid-name
"""Returns all the permissions the user has in the given course."""
course = modulestore().get_course(course_id)
if course is None:
raise ItemNotFoundError(course_id)
all_roles = {role.name for role in Role.objects.filter(users=user, course_id=course_id)}
permissions = {
permission.name
for permission
in Permission.objects.filter(roles__users=user, roles__course_id=course_id)
if not permission_blacked_out(course, all_roles, permission.name)
}
return permissions
|
stanley-cheung/grpc
|
refs/heads/master
|
test/cpp/naming/gen_build_yaml.py
|
8
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the naming tests."""
import yaml
import collections
import hashlib
import json
_LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353'
def _append_zone_name(name, zone_name):
return '%s.%s' % (name, zone_name)
def _build_expected_addrs_cmd_arg(expected_addrs):
out = []
for addr in expected_addrs:
out.append('%s,%s' % (addr['address'], str(addr['is_balancer'])))
return ';'.join(out)
def _resolver_test_cases(resolver_component_data):
out = []
for test_case in resolver_component_data['resolver_component_tests']:
target_name = _append_zone_name(
test_case['record_to_resolve'],
resolver_component_data['resolver_tests_common_zone_name'])
out.append({
'test_title':
target_name,
'arg_names_and_values': [
('target_name', target_name),
('do_ordered_address_comparison',
test_case['do_ordered_address_comparison']),
('expected_addrs',
_build_expected_addrs_cmd_arg(test_case['expected_addrs'])),
('expected_chosen_service_config',
(test_case['expected_chosen_service_config'] or '')),
('expected_service_config_error',
(test_case['expected_service_config_error'] or '')),
('expected_lb_policy', (test_case['expected_lb_policy'] or '')),
('enable_srv_queries', test_case['enable_srv_queries']),
('enable_txt_queries', test_case['enable_txt_queries']),
('inject_broken_nameserver_list',
test_case['inject_broken_nameserver_list']),
],
})
return out
def main():
resolver_component_data = ''
with open('test/cpp/naming/resolver_test_record_groups.yaml') as f:
resolver_component_data = yaml.load(f, Loader=yaml.FullLoader)
json = {
'resolver_tests_common_zone_name':
resolver_component_data['resolver_tests_common_zone_name'],
# this data is required by the resolver_component_tests_runner.py.template
'resolver_component_test_cases':
_resolver_test_cases(resolver_component_data),
}
print(yaml.dump(json))
if __name__ == '__main__':
main()
|
alivecor/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/reduce_ops_test.py
|
94
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ReduceOpsTest(XLATestCase):
def _testReduction(self, tf_reduce_fn, np_reduce_fn, dtype, test_inputs,
rtol=1e-4, atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=0),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
FLOAT_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
NONEMPTY_FLOAT_DATA = [
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
def testReduceSum(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32,
self.FLOAT_DATA)
def testReduceProd(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.FLOAT_DATA)
def testReduceMin(self):
def reference_min(inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.amin(inp, axis)
self._testReduction(math_ops.reduce_min, reference_min, np.float32,
self.FLOAT_DATA)
def testReduceMax(self):
def reference_max(inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf'))
return np.amax(inp, axis)
self._testReduction(math_ops.reduce_max, reference_max, np.float32,
self.FLOAT_DATA)
def testReduceMean(self):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_FLOAT_DATA)
def testReduceAll(self):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA)
def testReduceAny(self):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
if __name__ == '__main__':
googletest.main()
|
idl3r/ktsan
|
refs/heads/tsan
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
ODM2/ODMToolsPython
|
refs/heads/master
|
odmtools/gui/frmFlagValues.py
|
1
|
# Boa:Dialog:frmFlagValues
import wx
from odmtools.odmdata import Qualifier
def create(parent):
return frmFlagValues(parent)
NEW = "[New Qualifier]"
class frmFlagValues(wx.Dialog):
def _init_coll_boxSizer1_Items(self, parent):
# generated method, don't edit
parent.AddWindow(self.splitter, 85, border=0, flag=wx.EXPAND)
parent.AddWindow(self.panel1, 15, border=0, flag=wx.EXPAND)
def _init_coll_boxSizer2_Items(self, parent):
# generated method, don't edit
parent.AddWindow(self.pnlFiller, 60, border=0, flag=0)
parent.AddWindow(self.btnOK, 0, border=0, flag=0)
parent.AddSpacer(wx.Size(8, 8), border=0, flag=0)
parent.AddWindow(self.btnCancel, 0, border=0, flag=0)
def _init_sizers(self):
# generated method, don't edit
self.boxSizer1 = wx.BoxSizer(orient=wx.VERTICAL)
self.boxSizer2 = wx.BoxSizer(orient=wx.HORIZONTAL)
self._init_coll_boxSizer1_Items(self.boxSizer1)
self._init_coll_boxSizer2_Items(self.boxSizer2)
self.SetSizer(self.boxSizer1)
self.panel1.SetSizer(self.boxSizer2)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, name=u'frmFlagValues', parent=prnt, pos=wx.Point(554, 325),
size=wx.Size(507, 206), style=wx.DEFAULT_DIALOG_STYLE, title=u'Flag Values')
self.SetClientSize(wx.Size(491, 168))
self.splitter = wx.SplitterWindow(name='splitter', parent=self,
pos=wx.Point(0, 0), size=wx.Size(491, 142), style=wx.BORDER_SUNKEN)
self.splitter.SetLabel(u'')
self.pnlSelect = wx.Panel( name='pnlSelect', parent=self.splitter,
pos=wx.Point(0, 0), size=wx.Size(491, 10), style=wx.TAB_TRAVERSAL)
self.pnlCreate = wx.Panel( name='pnlCreate', parent=self.splitter,
pos=wx.Point(0, 14), size=wx.Size(491, 178), style=wx.TAB_TRAVERSAL)
self.pnlCreate.Show(False)
self.lblQualifier = wx.StaticText( label=u'Qualifier:', name=u'lblQualifier',
parent=self.pnlSelect, pos=wx.Point(16, 8), size=wx.Size(45, 13), style=0)
self.cbQualif = wx.ComboBox(choices=[x for x in self.qualchoices.keys()] + [NEW], name=u'cbQualif',
parent=self.pnlSelect, pos=wx.Point(16, 25), size=wx.Size(376, 21),
style=wx.CB_READONLY, value=self.selectedValue)
self.cbQualif.Bind(wx.EVT_COMBOBOX, self.OnCbQualifCombobox)
self.showNewFields()
self.lblCode = wx.StaticText( label=u'Code:', name=u'lblCode',
parent=self.pnlCreate, pos=wx.Point(16, 8), size=wx.Size(30, 13), style=0)
self.lblDesc = wx.StaticText( label=u'Description:', name=u'lblDesc',
parent=self.pnlCreate, pos=wx.Point(16, 35), size=wx.Size(58, 13), style=0)
self.txtCode = wx.TextCtrl( name=u'txtCode', parent=self.pnlCreate,
pos=wx.Point(75, 8), size=wx.Size(100, 21), style=0, value=u'')
self.txtDesc = wx.TextCtrl( name=u'txtDesc', parent=self.pnlCreate,
pos=wx.Point(75, 35), size=wx.Size(296, 24), style=0, value=u'')
self.panel1 = wx.Panel(name='panel1', parent=self, pos=wx.Point(0, 142),
size=wx.Size(491, 26), style=wx.TAB_TRAVERSAL)
self.pnlFiller = wx.Panel( name=u'pnlFiller', parent=self.panel1,
pos=wx.Point(0, 0), size=wx.Size(333, 100), style=wx.TAB_TRAVERSAL)
self.btnOK = wx.Button(label=u'OK', name=u'btnOK', parent=self.panel1,
pos=wx.Point(333, 0), size=wx.Size(75, 23), style=0)
self.btnOK.Bind(wx.EVT_BUTTON, self.OnBtnOKButton)
self.btnCancel = wx.Button( label=u'Cancel', name=u'btnCancel',
parent=self.panel1, pos=wx.Point(416, 0), size=wx.Size(75, 23), style=0)
self.btnCancel.Bind(wx.EVT_BUTTON, self.OnBtnCancelButton)
self.splitter.Initialize(self.pnlSelect)
self._init_sizers()
def __init__(self, parent, series_service, choices, isNew=False):
self.series_service = series_service
self.qualchoices = choices
#len choices added in cases where ther are no flags in the database
#desired functionality is to automatically select creation of new flag
if isNew or len(choices) ==0:
self.selectedValue = NEW
wx.CallAfter(self.showNewFields)
else:
self.selectedValue=choices.keys()[0]
self._init_ctrls(parent)
def GetValue(self):
try:
if self.qid:
return self.qid
else:
return None
except AttributeError:
return None
def OnCbQualifCombobox(self, event):
self.showNewFields()
event.Skip()
def showNewFields(self):
if self.cbQualif.GetValue() == NEW:
self.splitter.SplitHorizontally(self.pnlSelect, self.pnlCreate, 50)
else:
if self.splitter.IsSplit():
self.splitter.Unsplit(self.pnlCreate)
self.splitter.Initialize(self.pnlSelect)
def OnBtnOKButton(self, event):
# new
if self.splitter.IsSplit():
code = self.txtCode.GetValue()
desc = self.txtDesc.GetValue()
if not code or not desc:
msg = "Please double check that you have entered values into the code field and description field"
dlg = wx.MessageDialog(None, msg, "Code or Description cannot be empty",
wx.OK | wx.OK_DEFAULT | wx.ICON_WARNING)
dlg.ShowModal()
return
'''
q = Qualifier()
q.code = code
q.description = desc
'''
q=self.series_service.create_qualifier(code, desc)
self.qid = q.id
self.selectedValue = q.code + '-' + q.description
self.EndModal(wx.ID_OK)
else:
value = self.cbQualif.GetValue()
if value:
self.qid = self.qualchoices[value]
self.selectedValue = self.cbQualif.GetValue()
self.EndModal(wx.ID_OK)
event.Skip()
def OnBtnCancelButton(self, event):
self.EndModal(wx.ID_CANCEL)
event.Skip()
self.EndModal(wx.ID_CANCEL)
|
sclabs/sccms-nonrel
|
refs/heads/master
|
django/contrib/gis/db/backends/oracle/introspection.py
|
388
|
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute('SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper()))
row = cursor.fetchone()
except Exception, msg:
raise Exception('Could not find entry in USER_SDO_GEOM_METADATA corresponding to "%s"."%s"\n'
'Error message: %s.' % (table_name, geo_col, msg))
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
rvmoura96/projeto-almoxarifado
|
refs/heads/master
|
myvenv/Lib/site-packages/django/contrib/flatpages/migrations/0001_initial.py
|
308
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
|
betoesquivel/fil2014
|
refs/heads/master
|
filenv/lib/python2.7/site-packages/django/utils/archive.py
|
229
|
"""
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError
def list(self):
raise NotImplementedError
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesnt seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
|
xiaoixa/python
|
refs/heads/master
|
agmcs/0010/0010.py
|
40
|
from PIL import Image, ImageDraw, ImageFont
import random
im = Image.new('RGBA',(120,50),(255,255,255))
text = random.sample('abcdefghijklmnopqrstuvwxyz\
ABCDEFGHIJKLMNOPQRSTUVWXYZ',4)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("msyh.ttf",40)
x=0
y=0
for i in xrange(200):
x1 = random.randint(0,120)
y1 = random.randint(0,50)
x2 = random.randint(0,120)
y2 = random.randint(0,50)
fill = (random.randint(130,250),random.randint(130,250),random.randint(130,250))
draw.line(((x1,y1),(x2,y2)),fill=fill)
for word in text:
fill = (random.randint(0,130),random.randint(0,130),random.randint(0,130))
draw.text((x,y),word,font=font,fill=fill)
x+=30
for i in xrange(1000):
x1 = random.randint(0,119)
y1 = random.randint(0,49)
fill = (random.randint(20,250),random.randint(20,250),random.randint(20,250))
im.putpixel((x1,y1),fill)
im.save('1.jpg')
|
jeremysalwen/Ecasound-LV2
|
refs/heads/LV2
|
pyecasound/eci.py
|
1
|
"""
# eci.ECI -- A higher-level interface to pyeca.
# Copyright 2001 Eric S. Tiedemann (est@hyperreal.org)
# GPLed
some updates by Janne Halttunen
"""
import pyeca as _pyeca
import types as _types
class ECIError(Exception):
def __init__(self, what):
Exception.__init__(self, what)
self.what = what
def __str__(self):
return '<ECIException %s>' % self.what
class ECI:
"""An ECI is and ECA Control Interface object.
It can be called with ECI command strings (and an optional
float value) as arguments. A list or tuple of command
strings is also accepted and commands can be separated
by newlines within a single string.
The value of a command (or of the last command in a sequence)
if returned as a value of the appropriate Python type
(possibly None).
On errors, an ECIException is raised that has a `what'
member with the exception message. These exceptions also
stringify prettily.
"""
def __init__(self, *args):
self.e = apply(_pyeca.ECA_CONTROL_INTERFACE, args)
def __call__(self, cmd, f=None):
if f != None:
self.e.command_float_arg(cmd, f)
else:
if type(cmd) == _types.ListType or type(cmd) == _types.TupleType:
v = None
for c in cmd:
v = self(c)
return v
else:
cmds = cmd.split('\n')
if len(cmds) > 1:
v = None
for c in cmds:
v = self(c)
return v
else:
self.e.command(cmd)
t = self.e.last_type()
if not t or t == '-':
return None
elif t == 'S':
return self.e.last_string_list()
elif t == 's':
return self.e.last_string()
elif t == 'f':
return self.e.last_float()
elif t == 'i':
return self.e.last_integer()
elif t == 'li':
return self.e.last_long_integer()
elif t == 'e' or self.e.error():
raise ECIError, '%s: %s' % (self.e.last_error(), cmd)
else:
raise ECIError, "unknown return type '%s'!" % t
if __name__ == '__main__':
import time, sys
file = sys.argv[1]
e = ECI()
# uncomment to raise an error :)
#e('foo')
e("""
cs-add play_chainsetup
c-add 1st_chain
ai-add %s
ao-add /dev/dsp
cop-add -efl:100
cop-select 1
copp-select 1
cs-connect
start"""
% file)
cutoff_inc = 500.0
while 1:
time.sleep(1)
if e("engine-status") != "running" or e("get-position") > 15:
break
e("copp-set", cutoff_inc + e("copp-get"))
e("""stop
cs-disconnect""")
print "Chain operator status: ", e("cop-status")
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/313_test_resource.py
|
6
|
import unittest
from test import support
import time
resource = support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(support.TESTFN, "wb")
try:
f.write(b"X" * 1024)
try:
f.write(b"Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except IOError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
support.unlink(support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
def test_main(verbose=None):
support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.